|
@ -4,268 +4,313 @@ |
|
|
# Author : Ulrich Carmesin |
|
|
# Author : Ulrich Carmesin |
|
|
# Source : gitea.ucarmesin.de |
|
|
# Source : gitea.ucarmesin.de |
|
|
# --------------------------------------------------------------------------------------------------------- |
|
|
# --------------------------------------------------------------------------------------------------------- |
|
|
""" |
|
|
|
|
|
the issue of this tool is to transform extern data to the internal structure and the internal structure into extern data - i.e. mostly test-results. |
|
|
|
|
|
* * * * * * * * |
|
|
|
|
|
the testdata have several elements |
|
|
|
|
|
* parameter (-td --tdata) : to identify which testdata should be loaded |
|
|
|
|
|
* source (flaskdb: dbname / dir: filename) : always structured in a table (easy to specify) with columns |
|
|
|
|
|
* node : where the rows are |
|
|
|
|
|
* action : what should be done - default insert |
|
|
|
|
|
+ fields : dates in relation of a reference<day or a formula |
|
|
|
|
|
* interface : configured in components and used in comparison with attributes to each field: |
|
|
|
|
|
* ignored - if it should be ignored on differences, it is necessary on technical ID-fields |
|
|
|
|
|
* id-field - necessary |
|
|
|
|
|
* * * * * * * * |
|
|
|
|
|
the testdata itself which are written in different artifacts of modern applications are mostly stored as tree |
|
|
|
|
|
- so as xml, json, always with plain data in the leaf. So the intern structure should be also a tree - in python: dictionary. |
|
|
|
|
|
""" |
|
|
|
|
|
import os.path |
|
|
import os.path |
|
|
|
|
|
|
|
|
import basic.program |
|
|
import basic.program |
|
|
|
|
|
import utils.config_tool |
|
|
import utils.file_tool |
|
|
import utils.file_tool |
|
|
import basic.constants as B |
|
|
import basic.constants as B |
|
|
import utils.data_const as D |
|
|
import utils.data_const as D |
|
|
|
|
|
import utils.path_const as P |
|
|
|
|
|
import utils.path_tool |
|
|
import utils.date_tool |
|
|
import utils.date_tool |
|
|
import basic.step |
|
|
import basic.step |
|
|
|
|
|
import utils.i18n_tool |
|
|
|
|
|
import re |
|
|
|
|
|
|
|
|
TOOL_NAME = "tdata_tool" |
|
|
TOOL_NAME = "tdata_tool" |
|
|
""" name of the tool in order to switch debug-info on """ |
|
|
list_blocks = {} # lists of aliases |
|
|
TDATA_NODES = [ D.CSV_BLOCK_OPTION ] |
|
|
|
|
|
|
|
|
|
|
|
def getTdataAttr(): |
|
|
def getTestdata(job=None): |
|
|
|
|
|
""" |
|
|
|
|
|
get the testdata from one of the possible sources |
|
|
|
|
|
for the testcase resp testsuite of the job |
|
|
|
|
|
:return: |
|
|
|
|
|
""" |
|
|
|
|
|
if job is None: |
|
|
job = basic.program.Job.getInstance() |
|
|
job = basic.program.Job.getInstance() |
|
|
out = {} # |
|
|
if "testcase" in job.program: |
|
|
out[D.ATTR_SRC_TYPE] = D.DATA_SRC_DIR |
|
|
return collectTestdata(B.PAR_TESTCASE, job.par[B.PAR_TESTCASE], job) |
|
|
print("---getTdataAttr") |
|
|
else: |
|
|
print(vars(job.par)) |
|
|
return collectTestdata(B.PAR_TESTSUITE, job.par[B.PAR_TESTSUITE], job) |
|
|
if hasattr(job.par, B.PAR_TESTCASE): |
|
|
|
|
|
out[D.ATTR_SRC_NAME] = getattr(job.par, B.PAR_TESTCASE) |
|
|
|
|
|
elif hasattr(job.par, B.PAR_TESTSUITE): |
|
|
|
|
|
out[D.ATTR_SRC_NAME] = getattr(job.par, B.PAR_TESTSUITE) |
|
|
|
|
|
for p in [D.ATTR_SRC_TYPE, D.ATTR_SRC_DATA, D.ATTR_SRC_NAME]: |
|
|
|
|
|
# out[p] = "" |
|
|
|
|
|
if hasattr(job.par, p): |
|
|
|
|
|
out[p] = getattr(job.par, p) |
|
|
|
|
|
return out |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def getTestdata(): |
|
|
def collectTestdata(gran, testentity, job): |
|
|
""" |
|
|
""" |
|
|
get the testdata from one of the possible soources |
|
|
collects the testdata from kind of the possible sources |
|
|
* dir: each file in the specific testarchiv |
|
|
for the testcase resp testsuite |
|
|
* csv: specific file |
|
|
|
|
|
* db: specific db with a testcase-catalogue |
|
|
|
|
|
:return: |
|
|
:return: |
|
|
""" |
|
|
""" |
|
|
job = basic.program.Job.getInstance() |
|
|
setBlockLists(job) |
|
|
#reftyp = getattr(job.par, "tdtyp") |
|
|
if gran == B.PAR_TESTCASE: |
|
|
#source = getattr(job.par, "tdsrc") |
|
|
basispath = utils.path_tool.rejoinPath(job.conf.confs[B.SUBJECT_PATH][B.ATTR_PATH_TDATA], testentity) |
|
|
#criteria = getattr(job.par, "tdname") |
|
|
pathname = utils.config_tool.getConfigPath(P.KEY_TESTCASE, job.par[B.PAR_TESTCASE], "", job) |
|
|
tdata = getTdataAttr() # {"reftyp": reftyp, "source": source, "criteria": criteria} |
|
|
if gran == B.PAR_TESTSUITE: |
|
|
print(tdata) |
|
|
basispath = utils.path_tool.rejoinPath(job.conf.confs[B.SUBJECT_PATH][B.ATTR_PATH_TDATA], testentity) |
|
|
if tdata[D.ATTR_SRC_TYPE] == "flaskdb": |
|
|
pathname = utils.config_tool.getConfigPath(P.KEY_TESTSUITE, job.par[B.PAR_TESTSUITE], "", job) |
|
|
# read data-structure with sourcename |
|
|
if pathname[-3:] == D.DFILE_TYPE_CSV: |
|
|
# connect to source |
|
|
tdata = getCsvSpec(job.m, pathname, D.CSV_SPECTYPE_DATA) |
|
|
# select with all data with datastructure |
|
|
else: |
|
|
job.m.setInfo("Test-Data readed from " + tdata[D.ATTR_SRC_TYPE] + " for " + tdata[D.ATTR_SRC_NAME]) |
|
|
tdata = utils.file_tool.readFileDict(pathname, job.m) |
|
|
elif tdata[D.ATTR_SRC_TYPE] == D.DATA_SRC_CSV: |
|
|
# get explicit specdata of includes |
|
|
# read file in testdata |
|
|
for pathname in tdata[D.CSV_BLOCK_IMPORT]: |
|
|
job.m.logInfo("Test-Data readed from " + tdata[D.ATTR_SRC_TYPE] + " for " + tdata[D.ATTR_SRC_NAME]) |
|
|
pathname = utils.path_tool.rejoinPath(pathname) |
|
|
elif tdata[D.ATTR_SRC_TYPE] == D.DATA_SRC_DIR: |
|
|
if job.conf.confs[B.SUBJECT_PATH][B.ATTR_PATH_TDATA] not in pathname: |
|
|
path = os.path.join(job.conf.getJobConf(B.SUBJECT_PATH+":"+B.ATTR_PATH_TDATA), tdata[D.ATTR_SRC_NAME]) |
|
|
pathname = utils.path_tool.rejoinPath(basispath, pathname) |
|
|
filename = os.path.join(path , "testspec.csv") |
|
|
if pathname[-3:] == D.DFILE_TYPE_CSV: |
|
|
data = getCsvSpec(job.m, filename, D.CSV_SPECTYPE_DATA) |
|
|
data = getCsvSpec(job.m, pathname, D.CSV_SPECTYPE_DATA) |
|
|
for k in data: |
|
|
else: |
|
|
tdata[k] = data[k] |
|
|
data = utils.file_tool.readFileDict(pathname, job.m) |
|
|
if (k == D.CSV_BLOCK_OPTION): |
|
|
for table in data[D.CSV_BLOCK_TABLES]: |
|
|
for p in data[k]: |
|
|
if table in tdata[D.CSV_BLOCK_TABLES]: |
|
|
setattr(job.par, p, data[k][p]) |
|
|
print("Fehler") |
|
|
files = utils.file_tool.getFiles(job.m, path, "table_", None) |
|
|
tdata[D.CSV_BLOCK_TABLES][table] = data[D.CSV_BLOCK_TABLES][table] |
|
|
|
|
|
# get implicit specdata of spec-library |
|
|
|
|
|
for prefix in list_blocks[D.DFILE_TABLE_PREFIX]: |
|
|
|
|
|
files = utils.file_tool.getFiles(job.m, basispath, prefix, None) |
|
|
|
|
|
if len(files) < 0: |
|
|
|
|
|
continue |
|
|
for f in files: |
|
|
for f in files: |
|
|
print(f) |
|
|
if f in tdata[D.CSV_BLOCK_TABLES]: |
|
|
filename = os.path.join(path, f) |
|
|
continue |
|
|
data = readCsv(job.m, filename, None) |
|
|
pathname = utils.path_tool.rejoinPath(basispath, f) |
|
|
table = f[6:-4] |
|
|
if pathname[-3:] == D.DFILE_TYPE_CSV: |
|
|
print(filename+" "+table) |
|
|
data = getCsvSpec(job.m, pathname, D.CSV_SPECTYPE_DATA) |
|
|
if B.DATA_NODE_TABLES not in tdata: |
|
|
|
|
|
tdata[B.DATA_NODE_TABLES] = {} |
|
|
|
|
|
tdata[B.DATA_NODE_TABLES][table] = data[B.DATA_NODE_TABLES][table] |
|
|
|
|
|
else: |
|
|
else: |
|
|
job.m.setFatal("test-Data: reftyp " + tdata[D.ATTR_SRC_TYPE] + " is not implemented") |
|
|
data = utils.file_tool.readFileDict(pathname, job.m) |
|
|
|
|
|
for table in data[D.CSV_BLOCK_TABLES]: |
|
|
|
|
|
if table in tdata[D.CSV_BLOCK_TABLES]: |
|
|
|
|
|
print("Fehler") |
|
|
|
|
|
tdata[D.CSV_BLOCK_TABLES][table] = data[D.CSV_BLOCK_TABLES][table] |
|
|
|
|
|
# fill the options into job-parameter |
|
|
|
|
|
for p in tdata[D.CSV_BLOCK_OPTION]: |
|
|
|
|
|
setattr(job.par, p, tdata[D.CSV_BLOCK_OPTION][p]) |
|
|
return tdata |
|
|
return tdata |
|
|
|
|
|
|
|
|
def getCsvSpec(msg, filename, type): |
|
|
|
|
|
|
|
|
def setBlockLists(job): |
|
|
|
|
|
for block in D.LIST_BLOCK_CONST + D.LIST_ATTR_CONST + D.LIST_DFNAME_CONST: |
|
|
|
|
|
list = utils.i18n_tool.I18n.getInstance().getAliasList(block+"='"+eval("D."+block)+"'") |
|
|
|
|
|
#list.append(eval("D."+block)) |
|
|
|
|
|
list_blocks[eval("D." + block)] = [] |
|
|
|
|
|
for x in list: |
|
|
|
|
|
list_blocks[eval("D." + block)].append(x.lower()) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def readCsv(msg, filename, comp, aliasNode="", job=None): |
|
|
|
|
|
if job is None: |
|
|
|
|
|
job = basic.program.Job.getInstance() |
|
|
|
|
|
lines = utils.file_tool.readFileLines(filename, msg) |
|
|
|
|
|
print("readCsv "+filename) |
|
|
|
|
|
return parseCsv(msg, filename, lines, comp, aliasNode, job) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def parseCsv(msg, filename, lines, comp, aliasNode="", job=None): |
|
|
|
|
|
if job is None: |
|
|
|
|
|
job = basic.program.Job.getInstance() |
|
|
|
|
|
if len(list_blocks) < 1: |
|
|
|
|
|
setBlockLists(job) |
|
|
|
|
|
tdata = {} |
|
|
|
|
|
if len(aliasNode) < 1: |
|
|
|
|
|
print(str(list_blocks)) |
|
|
|
|
|
aliasNode = extractAliasNode(filename, comp, job) |
|
|
|
|
|
if len(aliasNode) > 3: |
|
|
|
|
|
tdata[D.DATA_ATTR_ALIAS] = aliasNode |
|
|
|
|
|
return parseCsvSpec(msg, lines, D.CSV_SPECTYPE_DATA, tdata, job) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def extractAliasNode(filename, comp, job): |
|
|
|
|
|
basename = os.path.basename(filename)[0:-4] |
|
|
|
|
|
for prefix in list_blocks[D.DFILE_TABLE_PREFIX]: |
|
|
|
|
|
if basename.find(prefix) == 0: |
|
|
|
|
|
basename = basename[len(prefix):] |
|
|
|
|
|
if comp is None: |
|
|
|
|
|
return "" |
|
|
|
|
|
if B.TOPIC_NODE_DB in comp.conf[B.SUBJECT_ARTS] and basename in comp.conf[B.DATA_NODE_DDL]: |
|
|
|
|
|
return B.DATA_NODE_TABLES+":"+basename |
|
|
|
|
|
return "" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def getCsvSpec(msg, filename, ttype, job=None): |
|
|
""" |
|
|
""" |
|
|
get data from a csv-file |
|
|
reads the specification from a csv-file and maps it into the internal data-structure |
|
|
a = field[0] delimited by : |
|
|
:param msg: |
|
|
a) data : like a table with data-array of key-value-pairs |
|
|
:param filename: |
|
|
a_0 is keyword [option, step, CSV_HEADER_START ] |
|
|
:param type: |
|
|
a_0 : { a_1 : { f_1 : v_1, .... } # option, step |
|
|
:param job: |
|
|
a_0 : { .. a_n : { _header : [ .. ], _data : [ rows... ] # table, node |
|
|
:return: |
|
|
b) tree : as a tree - the rows must be unique identified by the first column |
|
|
|
|
|
a_0 is keyword in CSV_HEADER_START |
|
|
|
|
|
a_0 : { .. a_n : { _header : [ fields.. ], _data : { field : value } |
|
|
|
|
|
c) keys : as a tree - the rows must be unique identified by the first column |
|
|
|
|
|
a_0 is keyword in CSV_HEADER_START |
|
|
|
|
|
a_1 ... a_n is key characterized by header-field like _fk* or _pk* |
|
|
|
|
|
a_0 : { .. a_n : { _keys : [ _fpk*.. ] , _header : [ fields.. ], _data : { pk_0 : { ... pk_n : { field : value } |
|
|
|
|
|
d) conf: |
|
|
|
|
|
_header : [ field_0, ... ] |
|
|
|
|
|
{ field_0 : { attr_0 : val_0, .. }, field_1 : { ... }, ... } |
|
|
|
|
|
""" |
|
|
""" |
|
|
|
|
|
if job is None: |
|
|
|
|
|
job = basic.program.Job.getInstance() |
|
|
lines = utils.file_tool.readFileLines(filename, msg) |
|
|
lines = utils.file_tool.readFileLines(filename, msg) |
|
|
return parseCsvSpec(msg, lines, type) |
|
|
tdata = {} # the result |
|
|
|
|
|
return parseCsvSpec(msg, lines, ttype, tdata, job) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def parseCsvSpec(msg, lines, ttype, tdata, job=None): |
|
|
|
|
|
""" |
|
|
|
|
|
|
|
|
def parseCsvSpec(msg, lines, type): |
|
|
:param msg: |
|
|
|
|
|
:param lines: |
|
|
|
|
|
:param type: |
|
|
|
|
|
:param job: |
|
|
|
|
|
:return: |
|
|
|
|
|
""" |
|
|
|
|
|
if job is None: |
|
|
job = basic.program.Job.getInstance() |
|
|
job = basic.program.Job.getInstance() |
|
|
data = {} |
|
|
if len(list_blocks) < 1: |
|
|
header = [] |
|
|
setBlockLists(job) |
|
|
h = [] # from a[] |
|
|
|
|
|
status = "start" |
|
|
status = "start" |
|
|
tableDate = utils.date_tool.getActdate(utils.date_tool.F_DE) |
|
|
tableAttr = {} # table |
|
|
tableDict = {} |
|
|
tableDict = {} # table |
|
|
for l in lines: |
|
|
for l in lines: |
|
|
print("lines "+l) |
|
|
print("lines "+l) |
|
|
fields = l.split(D.CSV_DELIMITER) |
|
|
fields = splitFields(l, D.CSV_DELIMITER, job) |
|
|
# check empty line, comment |
|
|
# check empty line, comment |
|
|
if (len(l.strip().replace(D.CSV_DELIMITER,"")) < 1): |
|
|
if (len(fields) < 1) or (len(l.strip().replace(D.CSV_DELIMITER,"")) < 1): |
|
|
status = "start" |
|
|
status = "start" |
|
|
continue |
|
|
continue |
|
|
if (fields[0][0:1] == "#"): |
|
|
if (fields[0][0:1] == "#"): |
|
|
continue |
|
|
continue |
|
|
a = fields[0].lower().split(":") |
|
|
a = fields[0].lower().split(":") |
|
|
# keywords option, step, table |
|
|
# keywords option, step, table |
|
|
if a[0] not in data and (a[0] in TDATA_NODES): |
|
|
print(str(a)+" -- "+str(fields)) |
|
|
data[a[0]] = {} |
|
|
tableAttr = setTableAttribute(tableAttr, a[0], fields[1], job) |
|
|
if (a[0].lower() == D.CSV_BLOCK_STEP): |
|
|
if (tableAttr["hit"]): |
|
|
if (not B.DATA_NODE_STEPS in data): |
|
|
status = "TABLE_ALIAS" |
|
|
data[B.DATA_NODE_STEPS] = [] |
|
|
|
|
|
step = basic.step.parseStep(job, fields) |
|
|
|
|
|
""" |
|
|
|
|
|
step = {} |
|
|
|
|
|
step[B.DATA_NODE_COMP] = fields[D.STEP_COMP_I] |
|
|
|
|
|
step[B.ATTR_EXEC_REF] = fields[D.STEP_EXECNR_I] |
|
|
|
|
|
step[B.ATTR_DATA_REF] = fields[D.STEP_REFNR_I] |
|
|
|
|
|
step[B.ATTR_STEP_ARGS] = {} |
|
|
|
|
|
if D.STEP_ARGS_I == D.STEP_LIST_I: |
|
|
|
|
|
args = "" |
|
|
|
|
|
for i in range(D.STEP_ARGS_I, len(fields)): |
|
|
|
|
|
if len(fields[i]) < 1: |
|
|
|
|
|
continue |
|
|
continue |
|
|
if fields[i][0:1] == "#": |
|
|
if (a[0].lower() in list_blocks[D.CSV_BLOCK_HEAD]): |
|
|
|
|
|
print("head "+l) |
|
|
|
|
|
setTdataLine(tdata, fields, D.CSV_BLOCK_HEAD, job) |
|
|
|
|
|
status = "start" |
|
|
continue |
|
|
continue |
|
|
args += "," + fields[i] |
|
|
elif (a[0].lower() in list_blocks[D.CSV_BLOCK_OPTION]): |
|
|
args = args[1:] |
|
|
print("option " + l) |
|
|
else: |
|
|
setTdataLine(tdata, fields, D.CSV_BLOCK_OPTION, job) |
|
|
args = fields[D.STEP_ARGS_I] |
|
|
status = "start" |
|
|
a = args.split(",") |
|
|
continue |
|
|
for arg in a: |
|
|
elif (a[0].lower() in list_blocks[D.CSV_BLOCK_STEP]): |
|
|
print("arg "+arg) |
|
|
print("step "+l) |
|
|
b = arg.split(":") |
|
|
step = basic.step.parseStep(job, fields) |
|
|
if len(b) < 2: |
|
|
if D.CSV_BLOCK_STEP not in tdata: |
|
|
raise Exception(D.EXCP_MALFORMAT + "" + l) |
|
|
tdata[D.CSV_BLOCK_STEP] = [] |
|
|
step[B.ATTR_STEP_ARGS][b[0]] = b[1] |
|
|
tdata[D.CSV_BLOCK_STEP].append(step) |
|
|
""" |
|
|
status = "start" |
|
|
data[B.DATA_NODE_STEPS].append(step) |
|
|
|
|
|
continue |
|
|
continue |
|
|
elif (a[0].lower() == D.CSV_BLOCK_OPTION): |
|
|
elif (a[0].lower() in list_blocks[D.CSV_BLOCK_IMPORT]): |
|
|
if len(a) < 2: |
|
|
print("includes " + l) |
|
|
raise Exception(D.EXCP_MALFORMAT+""+l) |
|
|
if D.CSV_BLOCK_IMPORT not in tdata: |
|
|
data[a[0]][a[1]] = fields[1] |
|
|
tdata[D.CSV_BLOCK_IMPORT] = [] |
|
|
|
|
|
tdata[D.CSV_BLOCK_IMPORT].append(fields[1]) |
|
|
|
|
|
status = "start" |
|
|
continue |
|
|
continue |
|
|
elif a[0].lower() == D.DATA_ATTR_DATE: |
|
|
elif (a[0].lower() in list_blocks[D.CSV_BLOCK_TABLES]): |
|
|
tableDate = fields[1] |
|
|
print("tables "+l) |
|
|
elif (a[0].lower() in D.CSV_HEADER_START): |
|
|
|
|
|
# create deep structure a_0 ... a_n |
|
|
|
|
|
print("tdata 136 CSV_HEADER_START "+str(len(a))) |
|
|
|
|
|
h = a |
|
|
h = a |
|
|
header = [] |
|
|
|
|
|
if B.DATA_NODE_TABLES not in data: |
|
|
|
|
|
data[B.DATA_NODE_TABLES] = {} |
|
|
|
|
|
h[0] = B.DATA_NODE_TABLES |
|
|
h[0] = B.DATA_NODE_TABLES |
|
|
comps = {} |
|
|
if ttype == D.CSV_SPECTYPE_CONF: |
|
|
tableDict = getTabContent(msg, data, h) |
|
|
del h[0] |
|
|
i = 0 |
|
|
tableDict = getTdataContent(msg, tdata, h) |
|
|
for f in fields: |
|
|
setTableHeader(tableDict, tableAttr, fields, ttype, job) |
|
|
i += 1 |
|
|
status = D.CSV_SPECTYPE_DATA |
|
|
if i <= 1: |
|
|
elif (status == D.CSV_SPECTYPE_DATA): |
|
|
continue |
|
|
tableDict = getTdataContent(msg, tdata, h) |
|
|
if len(f) < 1: |
|
|
print("setTableData "+str(h)+" "+str(tableDict)) |
|
|
break |
|
|
setTableData(tableDict, fields, ttype, job) |
|
|
header.append(f) |
|
|
elif (status == "TABLE_ALIAS") and D.DATA_ATTR_ALIAS in tdata: |
|
|
|
|
|
alias = tdata[D.DATA_ATTR_ALIAS] |
|
|
|
|
|
b = alias.split(":") |
|
|
|
|
|
h = [B.DATA_NODE_TABLES] + b |
|
|
|
|
|
tableDict = getTdataContent(msg, tdata, h) |
|
|
|
|
|
tableDict[D.DATA_ATTR_ALIAS] = alias |
|
|
|
|
|
fields = [alias] + fields |
|
|
|
|
|
setTableHeader(tableDict, tableAttr, fields, ttype, job) |
|
|
|
|
|
status = D.CSV_SPECTYPE_DATA |
|
|
|
|
|
if ttype == D.CSV_SPECTYPE_CONF: |
|
|
|
|
|
for k in tdata: |
|
|
|
|
|
if B.DATA_NODE_DATA in tdata[k]: |
|
|
|
|
|
tdata[k].pop(B.DATA_NODE_DATA) |
|
|
|
|
|
if B.DATA_NODE_TABLES in tdata[B.DATA_NODE_TABLES]: |
|
|
|
|
|
for k in tdata[B.DATA_NODE_TABLES][B.DATA_NODE_TABLES]: |
|
|
|
|
|
if k in tdata[B.DATA_NODE_TABLES]: |
|
|
|
|
|
print("Error") |
|
|
|
|
|
else: |
|
|
|
|
|
tdata[B.DATA_NODE_TABLES][k] = tdata[B.DATA_NODE_TABLES][B.DATA_NODE_TABLES][k] |
|
|
|
|
|
tdata[B.DATA_NODE_TABLES].pop(B.DATA_NODE_TABLES) |
|
|
|
|
|
return tdata |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def setTableHeader(tableDict, tableAttr, fields, ttype, job): |
|
|
|
|
|
header = [] |
|
|
|
|
|
for i in range(1, len(fields)): |
|
|
|
|
|
header.append(fields[i]) |
|
|
tableDict[B.DATA_NODE_HEADER] = header |
|
|
tableDict[B.DATA_NODE_HEADER] = header |
|
|
print("tdata 165 header "+str(header)) |
|
|
for attr in tableAttr: |
|
|
if type == D.CSV_SPECTYPE_TREE: |
|
|
tableDict[attr] = tableAttr[attr] |
|
|
|
|
|
# preparate the sub-structure for row-data |
|
|
|
|
|
if ttype == D.CSV_SPECTYPE_TREE: |
|
|
tableDict[B.DATA_NODE_DATA] = {} |
|
|
tableDict[B.DATA_NODE_DATA] = {} |
|
|
elif type == D.CSV_SPECTYPE_KEYS: |
|
|
elif ttype == D.CSV_SPECTYPE_KEYS: |
|
|
tableDict[D.CSV_NODETYPE_KEYS] = {} |
|
|
tableDict[D.CSV_NODETYPE_KEYS] = {} |
|
|
elif type == D.CSV_SPECTYPE_CONF: |
|
|
|
|
|
tableDict = {} |
|
|
|
|
|
headerFields = [] |
|
|
|
|
|
else: |
|
|
else: |
|
|
tableDict[B.DATA_NODE_DATA] = [] |
|
|
tableDict[B.DATA_NODE_DATA] = [] |
|
|
tableDict[D.DATA_ATTR_DATE] = tableDate |
|
|
return tableDict |
|
|
setTabContent(msg, data, tableDict, h) |
|
|
|
|
|
status = D.CSV_SPECTYPE_DATA |
|
|
|
|
|
continue |
|
|
def setTableData(tableDict, fields, ttype, job): |
|
|
elif (status == D.CSV_SPECTYPE_DATA): |
|
|
|
|
|
# check A-col for substructure |
|
|
|
|
|
# fill data |
|
|
|
|
|
tableDict = getTabContent(msg, data, h) |
|
|
|
|
|
row = {} |
|
|
row = {} |
|
|
print(fields) |
|
|
if ttype == D.CSV_SPECTYPE_DATA and ":" not in fields[0] and D.DATA_ATTR_ALIAS in tableDict: |
|
|
|
|
|
fields = [tableDict[D.DATA_ATTR_ALIAS]] + fields |
|
|
i = 1 |
|
|
i = 1 |
|
|
# case-differentiation DATA or TREE |
|
|
for f in tableDict[B.DATA_NODE_HEADER]: |
|
|
for f in header: |
|
|
|
|
|
print(str(i)+" "+str(len(fields))+" "+str(len(header))) |
|
|
|
|
|
row[f] = fields[i] |
|
|
row[f] = fields[i] |
|
|
if type == D.CSV_SPECTYPE_TREE: |
|
|
|
|
|
tableDict[B.DATA_NODE_DATA][f] = fields[i] |
|
|
|
|
|
i += 1 |
|
|
i += 1 |
|
|
if type == D.CSV_SPECTYPE_DATA: |
|
|
if ttype == D.CSV_SPECTYPE_DATA: |
|
|
print("parseSpec "+ str(fields[0])) |
|
|
|
|
|
row[B.ATTR_DATA_COMP] = {} |
|
|
row[B.ATTR_DATA_COMP] = {} |
|
|
for c in fields[0].split(","): |
|
|
for c in fields[0].split(","): |
|
|
a = c.split(":") |
|
|
a = c.split(":") |
|
|
print("parseSpec " + str(a)) |
|
|
|
|
|
comps[a[0]] = a[1] |
|
|
|
|
|
row[B.ATTR_DATA_COMP][a[0]] = a[1] |
|
|
row[B.ATTR_DATA_COMP][a[0]] = a[1] |
|
|
#row[B.ATTR_DATA_COMP] = fields[0].split(",") |
|
|
|
|
|
tableDict[B.ATTR_DATA_COMP] = comps |
|
|
|
|
|
tableDict[B.DATA_NODE_DATA].append(row) |
|
|
tableDict[B.DATA_NODE_DATA].append(row) |
|
|
elif type == D.CSV_SPECTYPE_KEYS: |
|
|
elif ttype == D.CSV_SPECTYPE_KEYS: |
|
|
tableDict[D.CSV_NODETYPE_KEYS][fields[1]] = row |
|
|
tableDict[D.CSV_NODETYPE_KEYS][fields[1]] = row |
|
|
elif type == D.CSV_SPECTYPE_CONF: |
|
|
elif ttype == D.CSV_SPECTYPE_CONF: |
|
|
tableDict[fields[1]] = row |
|
|
tableDict[fields[1]] = row |
|
|
headerFields.append(fields[1]) |
|
|
return tableDict |
|
|
setTabContent(msg, data, tableDict, h) |
|
|
|
|
|
if (status in [D.CSV_SPECTYPE_DATA, D.CSV_SPECTYPE_KEYS]): |
|
|
|
|
|
tableDict = getTabContent(msg, data, h) |
|
|
|
|
|
if type == D.CSV_SPECTYPE_CONF: |
|
|
|
|
|
tableDict[B.DATA_NODE_HEADER] = headerFields |
|
|
|
|
|
setTabContent(msg, data, tableDict, h) |
|
|
|
|
|
if type == D.CSV_SPECTYPE_CONF: |
|
|
|
|
|
data = data[B.DATA_NODE_TABLES] |
|
|
|
|
|
print("return getCsvSpec "+str(data)) |
|
|
|
|
|
return data |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def mergeTableComponents(comps, rowComps): |
|
|
def setTableAttribute(tableAttr, key, val, job): |
|
|
for c in rowComps.split(","): |
|
|
for attr in D.LIST_DATA_ATTR: |
|
|
a = c.split(":") |
|
|
if (key.lower() in list_blocks[attr]): |
|
|
comps[a[0]] = a[1] |
|
|
tableAttr[attr] = val |
|
|
return comps |
|
|
tableAttr["hit"] = True |
|
|
|
|
|
return tableAttr |
|
|
|
|
|
tableAttr["hit"] = False |
|
|
|
|
|
return tableAttr |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def setTabContent(msg, data, tabledata, path): |
|
|
def setTdataLine(tdata, fields, block, job): |
|
|
if len(path) >= 2 and path[1] not in data[path[0]]: |
|
|
""" |
|
|
data[path[0]][path[1]] = {} |
|
|
sets field(s) into tdata as a key-value-pair |
|
|
if len(path) >= 3 and path[2] not in data[path[0]][path[1]]: |
|
|
additional fields will be concatenate to a intern separated list |
|
|
data[path[0]][path[1]][path[2]] = {} |
|
|
:param tdata: |
|
|
if len(path) >= 4 and path[3] not in data[path[0]][path[1]][path[2]]: |
|
|
:param fields: |
|
|
data[path[0]][path[1]][path[2]][path[3]] = {} |
|
|
:param block: |
|
|
|
|
|
:param job: |
|
|
|
|
|
:return: |
|
|
|
|
|
""" |
|
|
|
|
|
a = fields[0].lower().split(":") |
|
|
|
|
|
a[0] = block # normalized key |
|
|
|
|
|
val = "" |
|
|
|
|
|
for i in range(1, len(fields)-1): |
|
|
|
|
|
val += D.INTERNAL_DELIMITER+fields[i] |
|
|
|
|
|
if len(val) > len(D.INTERNAL_DELIMITER): |
|
|
|
|
|
val = val[len(D.INTERNAL_DELIMITER):] |
|
|
|
|
|
setTdataContent(job.m, tdata, val, a) |
|
|
|
|
|
return tdata |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def setTdataContent(msg, data, tabledata, path): |
|
|
|
|
|
setTdataStructure(msg, data, path) |
|
|
if len(path) == 2: |
|
|
if len(path) == 2: |
|
|
data[path[0]][path[1]] = tabledata |
|
|
data[path[0]][path[1]] = tabledata |
|
|
elif len(path) == 3: |
|
|
elif len(path) == 3: |
|
@ -274,155 +319,45 @@ def setTabContent(msg, data, tabledata, path): |
|
|
data[path[0]][path[1]][path[2]][path[3]] = tabledata |
|
|
data[path[0]][path[1]][path[2]][path[3]] = tabledata |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def getTabContent(msg, data, path): |
|
|
def getTdataContent(msg, data, path): |
|
|
if len(path) >= 2 and path[1] not in data[path[0]]: |
|
|
setTdataStructure(msg, data, path) |
|
|
data[path[0]][path[1]] = {} |
|
|
|
|
|
if len(path) >= 3 and path[2] not in data[path[0]][path[1]]: |
|
|
|
|
|
data[path[0]][path[1]][path[2]] = {} |
|
|
|
|
|
if len(path) >= 4 and path[3] not in data[path[0]][path[1]][path[2]]: |
|
|
|
|
|
data[path[0]][path[1]][path[2]][path[3]] = {} |
|
|
|
|
|
if len(path) == 2: |
|
|
if len(path) == 2: |
|
|
return data[path[0]][path[1]] |
|
|
return data[path[0]][path[1]] |
|
|
elif len(path) == 3: |
|
|
elif len(path) == 3: |
|
|
return data[path[0]][path[1]][path[2]] |
|
|
return data[path[0]][path[1]][path[2]] |
|
|
elif len(path) == 4: |
|
|
elif len(path) == 4: |
|
|
return data[path[0]][path[1]][path[2]][path[3]] |
|
|
return data[path[0]][path[1]][path[2]][path[3]] |
|
|
|
|
|
elif len(path) == 1: |
|
|
|
|
|
return data[path[0]] |
|
|
else: |
|
|
else: |
|
|
pass |
|
|
return None |
|
|
|
|
|
|
|
|
def readCsv(msg, filename, comp, aliasNode=""): |
|
|
|
|
|
lines = utils.file_tool.readFileLines(filename, msg) |
|
|
|
|
|
print("readCsv "+filename) |
|
|
|
|
|
print(lines) |
|
|
|
|
|
return parseCsv(msg, filename, lines, comp, aliasNode) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def parseCsv(msg, filename, lines, comp, aliasNode=""): |
|
|
def setTdataStructure(msg, data, path): |
|
|
job = basic.program.Job.getInstance() |
|
|
if len(path) >= 1 and path[0] not in data: |
|
|
verify = -4+job.getDebugLevel(TOOL_NAME) |
|
|
data[path[0]] = {} |
|
|
job.debug(verify, "# # # # # # # # parseCsv " + filename + " :" + str(lines)) |
|
|
if len(path) >= 2 and path[1] not in data[path[0]]: |
|
|
fields = [] |
|
|
data[path[0]][path[1]] = {} |
|
|
nodes = [] |
|
|
if len(path) >= 3 and path[2] not in data[path[0]][path[1]]: |
|
|
columns = [] |
|
|
data[path[0]][path[1]][path[2]] = {} |
|
|
output = {} |
|
|
if len(path) >= 4 and path[3] not in data[path[0]][path[1]][path[2]]: |
|
|
state = 0 |
|
|
data[path[0]][path[1]][path[2]][path[3]] = {} |
|
|
data = {} |
|
|
|
|
|
tableDict = {} |
|
|
|
|
|
tableDate = "" |
|
|
|
|
|
tableCnt = 0 |
|
|
|
|
|
cnt = 0 |
|
|
|
|
|
basename = os.path.basename(filename)[0:-4] |
|
|
|
|
|
startCols = 1 |
|
|
|
|
|
for line in lines: |
|
|
|
|
|
fields = line.split(';') |
|
|
|
|
|
testline = line.replace(";", "") |
|
|
|
|
|
a = fields[0].split(':') |
|
|
|
|
|
job.debug(verify, str(state) + " line " + line + " :" + str(len(fields)) + ": " + str(fields)) |
|
|
|
|
|
if len(testline) < 2 and state < 1: |
|
|
|
|
|
state = 0 |
|
|
|
|
|
elif a[0].lower() == D.DATA_ATTR_DATE: |
|
|
|
|
|
tableDate = fields[1] |
|
|
|
|
|
state = 1 |
|
|
|
|
|
elif a[0].lower() == D.DATA_ATTR_COUNT: |
|
|
|
|
|
tableCnt = fields[1] |
|
|
|
|
|
state = 1 |
|
|
|
|
|
elif a[0].lower() in D.CSV_HEADER_START or \ |
|
|
|
|
|
(comp is not None and state == 1 |
|
|
|
|
|
and isCompTableFile(comp, filename)): |
|
|
|
|
|
state = 2 |
|
|
|
|
|
columns = [] |
|
|
|
|
|
h = a |
|
|
|
|
|
if len(h) < 2 and comp is not None: |
|
|
|
|
|
a = ["table", basename] |
|
|
|
|
|
h = a |
|
|
|
|
|
startCols = 0 |
|
|
|
|
|
cnt = len(fields) |
|
|
|
|
|
job.debug(verify, str(state) + " cnt " + str(cnt)) |
|
|
|
|
|
data[B.DATA_NODE_TABLES] = {} |
|
|
|
|
|
h[0] = B.DATA_NODE_TABLES |
|
|
|
|
|
if not aliasNode.isspace() and len(aliasNode) > 3: |
|
|
|
|
|
struct = aliasNode.split(":") |
|
|
|
|
|
for x in struct: |
|
|
|
|
|
if len(x) > 2: |
|
|
|
|
|
nodes.append(x) |
|
|
|
|
|
job.debug(verify, str(state) + " nodes " + str(nodes)) |
|
|
|
|
|
elif len(h) > 1: |
|
|
|
|
|
for i in range(1, len(h)): |
|
|
|
|
|
nodes.append(h[i]) |
|
|
|
|
|
job.debug(verify, str(state) + " nodes " + str(nodes)) |
|
|
|
|
|
tableDict = getTabContent(msg, data, h) |
|
|
|
|
|
tableDict[B.ATTR_DATA_COMP] = {} |
|
|
|
|
|
if len(tableDate) > 6: |
|
|
|
|
|
tableDict[D.DATA_ATTR_DATE] = tableDate |
|
|
|
|
|
if int(tableCnt) > 0: |
|
|
|
|
|
tableDict[D.DATA_ATTR_COUNT] = tableCnt |
|
|
|
|
|
j = 0 |
|
|
|
|
|
for i in range(1, cnt): |
|
|
|
|
|
if fields[i][0:1] == "_": |
|
|
|
|
|
startCols += 1 |
|
|
|
|
|
continue |
|
|
|
|
|
job.debug(verify, str(i) + " cnt " + str(fields[i])) |
|
|
|
|
|
if len(fields[i]) > 0: |
|
|
|
|
|
columns.append(fields[i]) |
|
|
|
|
|
j = j + 1 |
|
|
|
|
|
cnt = j |
|
|
|
|
|
tableDict[B.DATA_NODE_HEADER] = columns |
|
|
|
|
|
job.debug(verify, str(state) + " " + str(cnt) + " cols " + str(columns)) |
|
|
|
|
|
elif state >= 2 and len(testline) > 2: |
|
|
|
|
|
job.debug(verify, str(state) + " " + str(len(testline))) |
|
|
|
|
|
tableDict = getTabContent(msg, data, h) |
|
|
|
|
|
state = 3 |
|
|
|
|
|
row = {} |
|
|
|
|
|
print(line) |
|
|
|
|
|
if startCols > 0: |
|
|
|
|
|
row[B.ATTR_DATA_COMP] = {} |
|
|
|
|
|
row[B.ATTR_DATA_COMP][a[0]] = a[1] |
|
|
|
|
|
tableDict[B.ATTR_DATA_COMP][a[0]] = a[1] |
|
|
|
|
|
for i in range(startCols, cnt+startCols): |
|
|
|
|
|
print("for "+str(i)+" "+str(len(row))+" "+str(startCols)+" "+str(len(fields))) |
|
|
|
|
|
print(str(fields[i])) |
|
|
|
|
|
if i >= len(columns)+startCols: |
|
|
|
|
|
break |
|
|
|
|
|
row[columns[i-startCols]] = fields[i] |
|
|
|
|
|
job.debug(verify, str(state) + " row " + str(row)) |
|
|
|
|
|
if B.DATA_NODE_DATA not in tableDict: |
|
|
|
|
|
tableDict[B.DATA_NODE_DATA] = [] |
|
|
|
|
|
tableDict[B.DATA_NODE_DATA].append(row) |
|
|
|
|
|
setTabContent(msg, data, tableDict, h) |
|
|
|
|
|
elif state == 3: |
|
|
|
|
|
job.debug(verify, "structure " + str(state) + ": " + str(nodes)) |
|
|
|
|
|
state = 0 |
|
|
|
|
|
return data |
|
|
return data |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def setSubnode(i, nodes, data, tree): |
|
|
def splitFields(line, delimiter, job): |
|
|
print("setSubnode " + str(i) + ": " + ": " + str(tree)) |
|
|
out = [] |
|
|
if i >= len(nodes): |
|
|
fields = line.split(delimiter) |
|
|
print("setSubnode a " + str(i)) |
|
|
for i in range(0, len(fields)): |
|
|
tree[B.DATA_NODE_DATA] = data |
|
|
if fields[i][0:1] == "#": |
|
|
elif tree is not None and nodes[i] in tree.keys(): |
|
|
break |
|
|
print("setSubnode b " + str(i)) |
|
|
if re.match(r"^\"(.*)\"$", fields[i]): |
|
|
tree[nodes[i]] = setSubnode(i+1, nodes, data, tree[nodes[i]]) |
|
|
fields[i] = fields[i][1:-1] |
|
|
else: |
|
|
out.append(fields[i]) |
|
|
print("setSubnode c " + str(i)) |
|
|
return out |
|
|
tree[nodes[i]] = setSubnode((i + 1), nodes, data, {}) |
|
|
|
|
|
return tree |
|
|
|
|
|
|
|
|
|
|
|
def getDataStructure(comp): |
|
|
|
|
|
# gets data-structure from the vml in the component-folder |
|
|
|
|
|
job = basic.program.Job.getInstance() |
|
|
|
|
|
verify = -1+job.getDebugLevel(TOOL_NAME) |
|
|
|
|
|
job.debug(verify, "getDataStructure " + comp) |
|
|
|
|
|
|
|
|
|
|
|
def normalizeDataRow(dstruct, xpathtupel, row, referencedate): |
|
|
|
|
|
# normalize data of the row if necessary |
|
|
|
|
|
# raw-value is saved as new field with _raw as suffix |
|
|
|
|
|
job = basic.program.Job.getInstance() |
|
|
|
|
|
verify = -1+job.getDebugLevel(TOOL_NAME) |
|
|
|
|
|
job.debug(verify, "calcDataRow " + row) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def buildCsvData(filename, tdata, comp): |
|
|
def buildCsvData(tdata, table, job=None): |
|
|
""" |
|
|
""" |
|
|
writes the testdata into a csv-file for documentation of the test-run |
|
|
writes the testdata into a csv-file for documentation of the test-run |
|
|
:param teststatus: |
|
|
:param teststatus: |
|
@ -430,22 +365,14 @@ def buildCsvData(filename, tdata, comp): |
|
|
:param comp: if specific else None |
|
|
:param comp: if specific else None |
|
|
:return: |
|
|
:return: |
|
|
""" |
|
|
""" |
|
|
compColumn = not isCompTableFile(comp, filename) |
|
|
|
|
|
job = basic.program.Job.getInstance() |
|
|
|
|
|
verify = -1+job.getDebugLevel(TOOL_NAME) |
|
|
|
|
|
job.debug(verify, "writeDataTable " + str(comp)) |
|
|
|
|
|
text = "" |
|
|
text = "" |
|
|
for k in [D.DATA_ATTR_DATE, D.DATA_ATTR_COUNT]: |
|
|
for k in [D.DATA_ATTR_DATE, D.DATA_ATTR_COUNT]: |
|
|
if k in tdata: |
|
|
if k in tdata: |
|
|
text += k+";"+str(tdata[k])+"\n" |
|
|
text += k+";"+str(tdata[k])+"\n" |
|
|
header = "table" |
|
|
header = utils.i18n_tool.I18n.getInstance().getText(f"{B.DATA_NODE_TABLES=}", job)+":"+table |
|
|
for f in tdata[B.DATA_NODE_HEADER]: |
|
|
for f in tdata[B.DATA_NODE_HEADER]: |
|
|
header += ";"+f |
|
|
header += ";"+f |
|
|
if compColumn: |
|
|
text += header + "\n" |
|
|
text += header |
|
|
|
|
|
else: |
|
|
|
|
|
#text += "_nr;" + header[6:] + "\n" |
|
|
|
|
|
text += header[6:] + "\n" |
|
|
|
|
|
i = 0 |
|
|
i = 0 |
|
|
for r in tdata[B.DATA_NODE_DATA]: |
|
|
for r in tdata[B.DATA_NODE_DATA]: |
|
|
row = "" |
|
|
row = "" |
|
@ -455,30 +382,6 @@ def buildCsvData(filename, tdata, comp): |
|
|
row += ";"+str(r[f]) |
|
|
row += ";"+str(r[f]) |
|
|
else: |
|
|
else: |
|
|
row += ";" |
|
|
row += ";" |
|
|
if compColumn: |
|
|
|
|
|
text += row |
|
|
text += row |
|
|
else: |
|
|
|
|
|
text += row[1:] |
|
|
|
|
|
#text += str(i) + row |
|
|
|
|
|
text += "\n" |
|
|
text += "\n" |
|
|
return text |
|
|
return text |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def writeCsvData(filename, tdata, comp): |
|
|
|
|
|
text = "" |
|
|
|
|
|
if B.DATA_NODE_TABLES in tdata: |
|
|
|
|
|
for k in tdata[B.DATA_NODE_TABLES]: |
|
|
|
|
|
text += buildCsvData(filename, tdata[B.DATA_NODE_TABLES][k], comp) |
|
|
|
|
|
text += "\n" |
|
|
|
|
|
utils.file_tool.writeFileText(comp.m, filename, text) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def isCompTableFile(comp, filename): |
|
|
|
|
|
""" check if the filename belongs to the component """ |
|
|
|
|
|
basetable = os.path.basename(filename)[0:-4] |
|
|
|
|
|
if comp is None: |
|
|
|
|
|
return False |
|
|
|
|
|
if B.TOPIC_NODE_DB in comp.conf[B.SUBJECT_ARTS] and basetable in comp.conf[B.DATA_NODE_DDL] \ |
|
|
|
|
|
and comp.name in filename: |
|
|
|
|
|
return True |
|
|
|
|
|
return False |
|
|
|