Browse Source

new spectype conf in tdata

master
Ulrich Carmesin 2 years ago
parent
commit
67cf647b48
  1. 26
      execute_testcase.py
  2. 97
      utils/tdata_tool.py

26
execute_testcase.py

@ -12,7 +12,7 @@ import basic.message as message
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
PROGRAM_NAME = "compare_testcase"
PROGRAM_NAME = "execute_testcase"
def startPyJob(job):
cm = basic.componentHandling.ComponentManager()
@ -20,19 +20,17 @@ def startPyJob(job):
cm.initComponents()
comps = cm.getComponents(PROGRAM_NAME)
job.m.setMsg("# Components initialized with these relevant components " + str(comps))
report = utils.report_tool.Report()
testdata = utils.tdata_tool.getTestdata()
for c in comps:
comp = cm.getComponent(c)
comp.m.logInfo("------- "+comp.name+" ----------------------------------------")
comp.compare_TcResults(report)
comp.m.logInfo("------- "+comp.name+" ----------------------------------------")
job.m.merge(comp.m)
print(str(comp))
comp.conf["function"][PROGRAM_NAME] = comp.m.topmessage
text = report.reportTestcase(job.par.testcase)
path = os.path.join(job.par.tcdir, "Result.html")
utils.file_tool.writeFileText(job.m, path, text)
tdata = utils.tdata_tool.getTestdata()
if not "_steps" in tdata:
raise Exception("no steps to execute in testdata")
for (step) in tdata["_steps"]:
if step["comp"] in comps:
comp = cm.getComponent(step["comp"])
comp.execute_testcase(step, tdata)
job.m.merge(comp.m)
else:
job.m.setError(step["comp"]+" kann nicht aufgerufen werden!")
# Press the green button in the gutter to run the script.
if __name__ == '__main__':

97
utils/tdata_tool.py

@ -35,6 +35,8 @@ CSV_DELIMITER = ";"
CSV_SPECTYPE_DATA = "data"
CSV_SPECTYPE_TREE = "tree"
CSV_SPECTYPE_KEYS = "keys"
CSV_SPECTYPE_CONF = "conf"
CSV_NODETYPE_KEYS = "_keys"
ATTR_SRC_TYPE = "tdtyp"
ATTR_SRC_DATA = "tdsrc"
@ -84,6 +86,9 @@ def getTestdata():
data = getCsvSpec(job.m, filename, CSV_SPECTYPE_DATA)
for k in data:
tdata[k] = data[k]
if (k == "option"):
for p in data[k]:
setattr(job.par, p, data[k][p])
else:
job.m.setFatal("test-Data: reftyp " + tdata[ATTR_SRC_TYPE] + " is not implemented")
return tdata
@ -103,11 +108,14 @@ def getCsvSpec(msg, filename, type):
a_0 is keyword in CSV_HEADER_START
a_1 ... a_n is key characterized by header-field like _fk* or _pk*
a_0 : { .. a_n : { _keys : [ _fpk*.. ] , _header : [ fields.. ], _data : { pk_0 : { ... pk_n : { field : value }
d) conf:
_header : [ field_0, ... ]
{ field_0 : { attr_0 : val_0, .. }, field_1 : { ... }, ... }
"""
data = {}
header = []
h = [] # from a[]
lines = utils.file_tool.readFileLines(msg, filename)
lines = utils.file_tool.readFileLines(filename, msg)
status = "start"
tableDict = {}
for l in lines:
@ -119,7 +127,7 @@ def getCsvSpec(msg, filename, type):
continue
if (fields[0][0:1] == "#"):
continue
a = fields[0].split(":")
a = fields[0].lower().split(":")
# keywords option, step, table
if (a[0] not in data):
data[a[0]] = {}
@ -142,48 +150,58 @@ def getCsvSpec(msg, filename, type):
elif (a[0].lower() in CSV_HEADER_START):
# create deep structure a_0 ... a_n
print("tdata 136 CSV_HEADER_START "+str(len(a)))
if (status == CSV_SPECTYPE_DATA):
setSpecDict(msg, h, data, tableDict)
#CSV_SPECTYPE_DATA
#CSV_SPECTYPE_TREE
#CSV_SPECTYPE_KEYS
tableDict = {}
tableDict[B.DATA_NODE_HEADER] = []
if type == CSV_SPECTYPE_DATA:
tableDict[B.DATA_NODE_DATA] = []
else:
tableDict[B.DATA_NODE_DATA] = []
#CSV_SPECTYPE_DATA
#CSV_SPECTYPE_TREE
#CSV_SPECTYPE_KEYS
# save header in separate structure
h = a
tableDict = getTabContent(msg, data, h)
i = 0
for f in fields:
i += 1
if i <= 1: continue
if i <= 1:
continue
if len(f) < 1:
break
header.append(f)
tableDict[B.DATA_NODE_HEADER] = header
if type == CSV_SPECTYPE_TREE:
tableDict[B.DATA_NODE_DATA] = {}
elif type == CSV_SPECTYPE_KEYS:
tableDict[CSV_NODETYPE_KEYS] = {}
elif type == CSV_SPECTYPE_CONF:
tableDict = {}
headerFields = []
else:
tableDict[B.DATA_NODE_DATA] = []
setTabContent(msg, data, tableDict, h)
status = CSV_SPECTYPE_DATA
h = a
continue
elif (status == CSV_SPECTYPE_DATA):
# check A-col for substructure
# fill data
tableDict = getTabContent(msg, data, h)
row = {}
i = 1
# case-differentiation DATA or TREE
for f in header:
row[f] = fields[i]
i += 1
if type == CSV_SPECTYPE_TREE:
tableDict[B.DATA_NODE_DATA][f] = fields[i]
i += 1
if type == CSV_SPECTYPE_DATA:
tableDict[B.DATA_NODE_DATA].append(row)
if (status == CSV_SPECTYPE_DATA):
setSpecDict(msg, h, data, tableDict)
elif type == CSV_SPECTYPE_KEYS:
tableDict[CSV_NODETYPE_KEYS][fields[1]] = row
elif type == CSV_SPECTYPE_CONF:
tableDict[fields[1]] = row
headerFields.append(fields[1])
setTabContent(msg, data, tableDict, h)
if (status in [CSV_SPECTYPE_DATA, CSV_SPECTYPE_KEYS]):
tableDict = getTabContent(msg, data, h)
if type == CSV_SPECTYPE_DATA:
tableDict[B.DATA_NODE_HEADER] = headerFields
setTabContent(msg, data, tableDict, h)
print("return getCsvSpec "+str(data))
return data
def setSpecDict(msg, path, data, tabledata):
def setTabContent(msg, data, tabledata, path):
if len(path) >= 2 and path[1] not in data[path[0]]:
data[path[0]][path[1]] = {}
if len(path) >= 3 and path[2] not in data[path[0]][path[1]]:
@ -196,7 +214,20 @@ def setSpecDict(msg, path, data, tabledata):
data[path[0]][path[1]][path[2]] = tabledata
elif len(path) == 4:
data[path[0]][path[1]][path[2]][path[3]] = tabledata
pass
def getTabContent(msg, data, path):
if len(path) >= 2 and path[1] not in data[path[0]]:
data[path[0]][path[1]] = {}
if len(path) >= 3 and path[2] not in data[path[0]][path[1]]:
data[path[0]][path[1]][path[2]] = {}
if len(path) >= 4 and path[3] not in data[path[0]][path[1]][path[2]]:
data[path[0]][path[1]][path[2]][path[3]] = {}
if len(path) == 2:
return data[path[0]][path[1]]
elif len(path) == 3:
return data[path[0]][path[1]][path[2]]
elif len(path) == 4:
return data[path[0]][path[1]][path[2]][path[3]]
def readCsv(msg, filename, comp):
job = basic.program.Job.getInstance()
@ -209,7 +240,8 @@ def readCsv(msg, filename, comp):
state = 0
data = []
cnt = 0
lines = utils.file_tool.readFileLines(msg, filename)
lines = utils.file_tool.readFileLines(filename, msg)
basename = os.path.basename(filename)[0:-4]
startCols = 1
for line in lines:
fields = line.split(';')
@ -234,13 +266,16 @@ def readCsv(msg, filename, comp):
cnt = j
job.debug(verify, str(state) + " " + str(cnt) + " cols " + str(columns))
elif state >= 2 and len(testline) > 2:
if state == 2:
nodes = fields[0].split(":")
if state == 2 and not fields[0].isspace():
struct = fields[0].split(":")
for x in struct:
if len(x) > 2:
nodes.append(x)
job.debug(verify, str(state) + " nodes " + str(nodes))
state = 3
row = {}
for i in range(startCols, cnt-1):
row[columns[i-2]] = fields[i]
row[columns[i-startCols]] = fields[i]
job.debug(verify, str(state) + " row " + str(row))
data.append(row)
elif state == 3:
@ -248,6 +283,8 @@ def readCsv(msg, filename, comp):
output = setSubnode(0, nodes, data, output)
data = []
state = 0
if len(nodes) < 1:
nodes.append(basename)
output = setSubnode(0, nodes, data, output)
return output
@ -255,7 +292,7 @@ def setSubnode(i, nodes, data, tree):
print("setSubnode " + str(i) + ": " + ": " + str(tree))
if i >= len(nodes):
print("setSubnode a " + str(i))
tree["data"] = data
tree["_data"] = data
elif tree is not None and nodes[i] in tree.keys():
print("setSubnode b " + str(i))
tree[nodes[i]] = setSubnode(i+1, nodes, data, tree[nodes[i]])
@ -277,7 +314,7 @@ def normalizeDataRow(dstruct, xpathtupel, row, referencedate):
verify = -1+job.getDebugLevel("tdata_tool")
job.debug(verify, "calcDataRow " + row)
def writeDataTable(filename, tdata, comp):
def writeCsvData(filename, tdata, comp):
"""
writes the testdata into a csv-file for documentation of the test-run
:param teststatus:

Loading…
Cancel
Save