|
@ -25,34 +25,22 @@ import os.path |
|
|
import basic.program |
|
|
import basic.program |
|
|
import utils.file_tool |
|
|
import utils.file_tool |
|
|
import basic.constants as B |
|
|
import basic.constants as B |
|
|
|
|
|
import utils.data_const as D |
|
|
|
|
|
|
|
|
DATA_SRC_DIR = "dir" |
|
|
TOOL_NAME = "tdata_tool" |
|
|
DATA_SRC_CSV = "csv" |
|
|
""" name of the tool in order to switch debug-info on """ |
|
|
|
|
|
|
|
|
CSV_HEADER_START = ["node", "table", "tabelle"] |
|
|
|
|
|
CSV_DELIMITER = ";" |
|
|
|
|
|
|
|
|
|
|
|
CSV_SPECTYPE_DATA = "data" |
|
|
|
|
|
CSV_SPECTYPE_TREE = "tree" |
|
|
|
|
|
CSV_SPECTYPE_KEYS = "keys" |
|
|
|
|
|
CSV_SPECTYPE_CONF = "conf" |
|
|
|
|
|
CSV_NODETYPE_KEYS = "_keys" |
|
|
|
|
|
|
|
|
|
|
|
ATTR_SRC_TYPE = "tdtyp" |
|
|
|
|
|
ATTR_SRC_DATA = "tdsrc" |
|
|
|
|
|
ATTR_SRC_NAME = "tdname" |
|
|
|
|
|
|
|
|
|
|
|
def getTdataAttr(): |
|
|
def getTdataAttr(): |
|
|
job = basic.program.Job.getInstance() |
|
|
job = basic.program.Job.getInstance() |
|
|
out = {} # |
|
|
out = {} # |
|
|
out[ATTR_SRC_TYPE] = DATA_SRC_DIR |
|
|
out[D.ATTR_SRC_TYPE] = D.DATA_SRC_DIR |
|
|
print("---getTdataAttr") |
|
|
print("---getTdataAttr") |
|
|
print(vars(job.par)) |
|
|
print(vars(job.par)) |
|
|
if hasattr(job.par, B.PAR_TESTCASE): |
|
|
if hasattr(job.par, B.PAR_TESTCASE): |
|
|
out[ATTR_SRC_NAME] = getattr(job.par, B.PAR_TESTCASE) |
|
|
out[D.ATTR_SRC_NAME] = getattr(job.par, B.PAR_TESTCASE) |
|
|
elif hasattr(job.par, B.PAR_TESTSUITE): |
|
|
elif hasattr(job.par, B.PAR_TESTSUITE): |
|
|
out[ATTR_SRC_NAME] = getattr(job.par, B.PAR_TESTSUITE) |
|
|
out[D.ATTR_SRC_NAME] = getattr(job.par, B.PAR_TESTSUITE) |
|
|
for p in [ATTR_SRC_TYPE, ATTR_SRC_DATA, ATTR_SRC_NAME]: |
|
|
for p in [D.ATTR_SRC_TYPE, D.ATTR_SRC_DATA, D.ATTR_SRC_NAME]: |
|
|
# out[p] = "" |
|
|
# out[p] = "" |
|
|
if hasattr(job.par, p): |
|
|
if hasattr(job.par, p): |
|
|
out[p] = getattr(job.par, p) |
|
|
out[p] = getattr(job.par, p) |
|
@ -73,24 +61,24 @@ def getTestdata(): |
|
|
#criteria = getattr(job.par, "tdname") |
|
|
#criteria = getattr(job.par, "tdname") |
|
|
tdata = getTdataAttr() # {"reftyp": reftyp, "source": source, "criteria": criteria} |
|
|
tdata = getTdataAttr() # {"reftyp": reftyp, "source": source, "criteria": criteria} |
|
|
print(tdata) |
|
|
print(tdata) |
|
|
if tdata[ATTR_SRC_TYPE] == "flaskdb": |
|
|
if tdata[D.ATTR_SRC_TYPE] == "flaskdb": |
|
|
# read data-structure with sourcename |
|
|
# read data-structure with sourcename |
|
|
# connect to source |
|
|
# connect to source |
|
|
# select with all data with datastructure |
|
|
# select with all data with datastructure |
|
|
job.m.setInfo("Test-Data readed from " + tdata[ATTR_SRC_TYPE] + " for " + tdata[ATTR_SRC_NAME]) |
|
|
job.m.setInfo("Test-Data readed from " + tdata[D.ATTR_SRC_TYPE] + " for " + tdata[D.ATTR_SRC_NAME]) |
|
|
elif tdata[ATTR_SRC_TYPE] == DATA_SRC_CSV: |
|
|
elif tdata[D.ATTR_SRC_TYPE] == D.DATA_SRC_CSV: |
|
|
# read file in testdata |
|
|
# read file in testdata |
|
|
job.m.logInfo("Test-Data readed from " + tdata[ATTR_SRC_TYPE] + " for " + tdata[ATTR_SRC_NAME]) |
|
|
job.m.logInfo("Test-Data readed from " + tdata[D.ATTR_SRC_TYPE] + " for " + tdata[D.ATTR_SRC_NAME]) |
|
|
elif tdata[ATTR_SRC_TYPE] == DATA_SRC_DIR: |
|
|
elif tdata[D.ATTR_SRC_TYPE] == D.DATA_SRC_DIR: |
|
|
filename = os.path.join(job.conf.getJobConf(B.SUBJECT_PATH+":"+B.ATTR_PATH_TDATA), tdata[ATTR_SRC_NAME], "testspec.csv") |
|
|
filename = os.path.join(job.conf.getJobConf(B.SUBJECT_PATH+":"+B.D.ATTR_PATH_TDATA), tdata[D.ATTR_SRC_NAME], "testspec.csv") |
|
|
data = getCsvSpec(job.m, filename, CSV_SPECTYPE_DATA) |
|
|
data = getCsvSpec(job.m, filename, D.CSV_SPECTYPE_DATA) |
|
|
for k in data: |
|
|
for k in data: |
|
|
tdata[k] = data[k] |
|
|
tdata[k] = data[k] |
|
|
if (k == "option"): |
|
|
if (k == "option"): |
|
|
for p in data[k]: |
|
|
for p in data[k]: |
|
|
setattr(job.par, p, data[k][p]) |
|
|
setattr(job.par, p, data[k][p]) |
|
|
else: |
|
|
else: |
|
|
job.m.setFatal("test-Data: reftyp " + tdata[ATTR_SRC_TYPE] + " is not implemented") |
|
|
job.m.setFatal("test-Data: reftyp " + tdata[D.ATTR_SRC_TYPE] + " is not implemented") |
|
|
return tdata |
|
|
return tdata |
|
|
|
|
|
|
|
|
def getCsvSpec(msg, filename, type): |
|
|
def getCsvSpec(msg, filename, type): |
|
@ -120,9 +108,9 @@ def getCsvSpec(msg, filename, type): |
|
|
tableDict = {} |
|
|
tableDict = {} |
|
|
for l in lines: |
|
|
for l in lines: |
|
|
print("lines "+l) |
|
|
print("lines "+l) |
|
|
fields = l.split(CSV_DELIMITER) |
|
|
fields = l.split(D.CSV_DELIMITER) |
|
|
# check empty line, comment |
|
|
# check empty line, comment |
|
|
if (len(l.strip().replace(CSV_DELIMITER,"")) < 1): |
|
|
if (len(l.strip().replace(D.CSV_DELIMITER,"")) < 1): |
|
|
status = "start" |
|
|
status = "start" |
|
|
continue |
|
|
continue |
|
|
if (fields[0][0:1] == "#"): |
|
|
if (fields[0][0:1] == "#"): |
|
@ -141,13 +129,13 @@ def getCsvSpec(msg, filename, type): |
|
|
a = fields[3].split(",") |
|
|
a = fields[3].split(",") |
|
|
for arg in a: |
|
|
for arg in a: |
|
|
b = arg.split(":") |
|
|
b = arg.split(":") |
|
|
step[B.ATTR_STEP_ARGS][b[0]] = b[1] |
|
|
step[B.D.ATTR_STEP_ARGS][b[0]] = b[1] |
|
|
data[B.DATA_NODE_STEPS].append(step) |
|
|
data[B.DATA_NODE_STEPS].append(step) |
|
|
continue |
|
|
continue |
|
|
elif (a[0].lower() == "option"): |
|
|
elif (a[0].lower() == "option"): |
|
|
data[a[0]][a[1]] = fields[1] |
|
|
data[a[0]][a[1]] = fields[1] |
|
|
continue |
|
|
continue |
|
|
elif (a[0].lower() in CSV_HEADER_START): |
|
|
elif (a[0].lower() in D.CSV_HEADER_START): |
|
|
# create deep structure a_0 ... a_n |
|
|
# create deep structure a_0 ... a_n |
|
|
print("tdata 136 CSV_HEADER_START "+str(len(a))) |
|
|
print("tdata 136 CSV_HEADER_START "+str(len(a))) |
|
|
h = a |
|
|
h = a |
|
@ -161,19 +149,19 @@ def getCsvSpec(msg, filename, type): |
|
|
break |
|
|
break |
|
|
header.append(f) |
|
|
header.append(f) |
|
|
tableDict[B.DATA_NODE_HEADER] = header |
|
|
tableDict[B.DATA_NODE_HEADER] = header |
|
|
if type == CSV_SPECTYPE_TREE: |
|
|
if type == D.CSV_SPECTYPE_TREE: |
|
|
tableDict[B.DATA_NODE_DATA] = {} |
|
|
tableDict[B.DATA_NODE_DATA] = {} |
|
|
elif type == CSV_SPECTYPE_KEYS: |
|
|
elif type == D.CSV_SPECTYPE_KEYS: |
|
|
tableDict[CSV_NODETYPE_KEYS] = {} |
|
|
tableDict[D.CSV_NODETYPE_KEYS] = {} |
|
|
elif type == CSV_SPECTYPE_CONF: |
|
|
elif type == D.CSV_SPECTYPE_CONF: |
|
|
tableDict = {} |
|
|
tableDict = {} |
|
|
headerFields = [] |
|
|
headerFields = [] |
|
|
else: |
|
|
else: |
|
|
tableDict[B.DATA_NODE_DATA] = [] |
|
|
tableDict[B.DATA_NODE_DATA] = [] |
|
|
setTabContent(msg, data, tableDict, h) |
|
|
setTabContent(msg, data, tableDict, h) |
|
|
status = CSV_SPECTYPE_DATA |
|
|
status = D.CSV_SPECTYPE_DATA |
|
|
continue |
|
|
continue |
|
|
elif (status == CSV_SPECTYPE_DATA): |
|
|
elif (status == D.CSV_SPECTYPE_DATA): |
|
|
# check A-col for substructure |
|
|
# check A-col for substructure |
|
|
# fill data |
|
|
# fill data |
|
|
tableDict = getTabContent(msg, data, h) |
|
|
tableDict = getTabContent(msg, data, h) |
|
@ -182,20 +170,20 @@ def getCsvSpec(msg, filename, type): |
|
|
# case-differentiation DATA or TREE |
|
|
# case-differentiation DATA or TREE |
|
|
for f in header: |
|
|
for f in header: |
|
|
row[f] = fields[i] |
|
|
row[f] = fields[i] |
|
|
if type == CSV_SPECTYPE_TREE: |
|
|
if type == D.CSV_SPECTYPE_TREE: |
|
|
tableDict[B.DATA_NODE_DATA][f] = fields[i] |
|
|
tableDict[B.DATA_NODE_DATA][f] = fields[i] |
|
|
i += 1 |
|
|
i += 1 |
|
|
if type == CSV_SPECTYPE_DATA: |
|
|
if type == D.CSV_SPECTYPE_DATA: |
|
|
tableDict[B.DATA_NODE_DATA].append(row) |
|
|
tableDict[B.DATA_NODE_DATA].append(row) |
|
|
elif type == CSV_SPECTYPE_KEYS: |
|
|
elif type == D.CSV_SPECTYPE_KEYS: |
|
|
tableDict[CSV_NODETYPE_KEYS][fields[1]] = row |
|
|
tableDict[D.CSV_NODETYPE_KEYS][fields[1]] = row |
|
|
elif type == CSV_SPECTYPE_CONF: |
|
|
elif type == D.CSV_SPECTYPE_CONF: |
|
|
tableDict[fields[1]] = row |
|
|
tableDict[fields[1]] = row |
|
|
headerFields.append(fields[1]) |
|
|
headerFields.append(fields[1]) |
|
|
setTabContent(msg, data, tableDict, h) |
|
|
setTabContent(msg, data, tableDict, h) |
|
|
if (status in [CSV_SPECTYPE_DATA, CSV_SPECTYPE_KEYS]): |
|
|
if (status in [D.CSV_SPECTYPE_DATA, D.CSV_SPECTYPE_KEYS]): |
|
|
tableDict = getTabContent(msg, data, h) |
|
|
tableDict = getTabContent(msg, data, h) |
|
|
if type == CSV_SPECTYPE_DATA: |
|
|
if type == D.CSV_SPECTYPE_DATA: |
|
|
tableDict[B.DATA_NODE_HEADER] = headerFields |
|
|
tableDict[B.DATA_NODE_HEADER] = headerFields |
|
|
setTabContent(msg, data, tableDict, h) |
|
|
setTabContent(msg, data, tableDict, h) |
|
|
print("return getCsvSpec "+str(data)) |
|
|
print("return getCsvSpec "+str(data)) |
|
@ -231,7 +219,7 @@ def getTabContent(msg, data, path): |
|
|
|
|
|
|
|
|
def readCsv(msg, filename, comp): |
|
|
def readCsv(msg, filename, comp): |
|
|
job = basic.program.Job.getInstance() |
|
|
job = basic.program.Job.getInstance() |
|
|
verify = -1+job.getDebugLevel("tdata_tool") |
|
|
verify = -1+job.getDebugLevel(TOOL_NAME) |
|
|
job.debug(verify, "readCsv " + filename) |
|
|
job.debug(verify, "readCsv " + filename) |
|
|
fields = [] |
|
|
fields = [] |
|
|
nodes = [] |
|
|
nodes = [] |
|
@ -249,7 +237,7 @@ def readCsv(msg, filename, comp): |
|
|
job.debug(verify, str(state) + " line " + line + " :" + str(len(fields)) + ": " + str(fields)) |
|
|
job.debug(verify, str(state) + " line " + line + " :" + str(len(fields)) + ": " + str(fields)) |
|
|
if len(testline) < 2 and state < 1: |
|
|
if len(testline) < 2 and state < 1: |
|
|
state = 0 |
|
|
state = 0 |
|
|
elif fields[0].lower() in CSV_HEADER_START: |
|
|
elif fields[0].lower() in D.CSV_HEADER_START: |
|
|
state = 2 |
|
|
state = 2 |
|
|
columns = [] |
|
|
columns = [] |
|
|
cnt = len(fields) |
|
|
cnt = len(fields) |
|
@ -304,14 +292,14 @@ def setSubnode(i, nodes, data, tree): |
|
|
def getDataStructure(comp): |
|
|
def getDataStructure(comp): |
|
|
# gets data-structure from the vml in the component-folder |
|
|
# gets data-structure from the vml in the component-folder |
|
|
job = basic.program.Job.getInstance() |
|
|
job = basic.program.Job.getInstance() |
|
|
verify = -1+job.getDebugLevel("tdata_tool") |
|
|
verify = -1+job.getDebugLevel(TOOL_NAME) |
|
|
job.debug(verify, "getDataStructure " + comp) |
|
|
job.debug(verify, "getDataStructure " + comp) |
|
|
|
|
|
|
|
|
def normalizeDataRow(dstruct, xpathtupel, row, referencedate): |
|
|
def normalizeDataRow(dstruct, xpathtupel, row, referencedate): |
|
|
# normalize data of the row if necessary |
|
|
# normalize data of the row if necessary |
|
|
# raw-value is saved as new field with _raw as suffix |
|
|
# raw-value is saved as new field with _raw as suffix |
|
|
job = basic.program.Job.getInstance() |
|
|
job = basic.program.Job.getInstance() |
|
|
verify = -1+job.getDebugLevel("tdata_tool") |
|
|
verify = -1+job.getDebugLevel(TOOL_NAME) |
|
|
job.debug(verify, "calcDataRow " + row) |
|
|
job.debug(verify, "calcDataRow " + row) |
|
|
|
|
|
|
|
|
def writeCsvData(filename, tdata, comp): |
|
|
def writeCsvData(filename, tdata, comp): |
|
@ -323,7 +311,7 @@ def writeCsvData(filename, tdata, comp): |
|
|
:return: |
|
|
:return: |
|
|
""" |
|
|
""" |
|
|
job = basic.program.Job.getInstance() |
|
|
job = basic.program.Job.getInstance() |
|
|
verify = -1+job.getDebugLevel("tdata_tool") |
|
|
verify = -1+job.getDebugLevel(TOOL_NAME) |
|
|
job.debug(verify, "writeDataTable " + str(comp)) |
|
|
job.debug(verify, "writeDataTable " + str(comp)) |
|
|
text = "table" |
|
|
text = "table" |
|
|
for f in tdata[B.DATA_NODE_HEADER]: |
|
|
for f in tdata[B.DATA_NODE_HEADER]: |
|
|