|
|
@ -34,6 +34,7 @@ CSV_DELIMITER = ";" |
|
|
|
|
|
|
|
CSV_SPECTYPE_DATA = "data" |
|
|
|
CSV_SPECTYPE_TREE = "tree" |
|
|
|
CSV_SPECTYPE_KEYS = "keys" |
|
|
|
|
|
|
|
ATTR_SRC_TYPE = "tdtyp" |
|
|
|
ATTR_SRC_DATA = "tdsrc" |
|
|
@ -43,10 +44,15 @@ def getTdataAttr(): |
|
|
|
job = basic.program.Job.getInstance() |
|
|
|
out = {} # |
|
|
|
out[ATTR_SRC_TYPE] = DATA_SRC_DIR |
|
|
|
print("---getTdataAttr") |
|
|
|
print(vars(job.par)) |
|
|
|
if hasattr(job.par, B.PAR_TESTCASE): |
|
|
|
out[ATTR_SRC_NAME] = getattr(job.par, B.PAR_TESTCASE) |
|
|
|
elif hasattr(job.par, B.PAR_TESTSUITE): |
|
|
|
out[ATTR_SRC_NAME] = getattr(job.par, B.PAR_TESTSUITE) |
|
|
|
for p in [ATTR_SRC_TYPE, ATTR_SRC_DATA, ATTR_SRC_NAME]: |
|
|
|
out[p] = "" |
|
|
|
if getattr(job.par, p): |
|
|
|
# out[p] = "" |
|
|
|
if hasattr(job.par, p): |
|
|
|
out[p] = getattr(job.par, p) |
|
|
|
return out |
|
|
|
|
|
|
@ -60,10 +66,11 @@ def getTestdata(): |
|
|
|
:return: |
|
|
|
""" |
|
|
|
job = basic.program.Job.getInstance() |
|
|
|
reftyp = getattr(job.par, "tdtyp") |
|
|
|
source = getattr(job.par, "tdsrc") |
|
|
|
criteria = getattr(job.par, "tdname") |
|
|
|
#reftyp = getattr(job.par, "tdtyp") |
|
|
|
#source = getattr(job.par, "tdsrc") |
|
|
|
#criteria = getattr(job.par, "tdname") |
|
|
|
tdata = getTdataAttr() # {"reftyp": reftyp, "source": source, "criteria": criteria} |
|
|
|
print(tdata) |
|
|
|
if tdata[ATTR_SRC_TYPE] == "flaskdb": |
|
|
|
# read data-structure with sourcename |
|
|
|
# connect to source |
|
|
@ -84,13 +91,25 @@ def getTestdata(): |
|
|
|
def getCsvSpec(msg, filename, type): |
|
|
|
""" |
|
|
|
get data from a csv-file |
|
|
|
a = field[0] delimited by : |
|
|
|
a) data : like a table with data-array of key-value-pairs |
|
|
|
a_0 is keyword [option, step, CSV_HEADER_START ] |
|
|
|
a_0 : { a_1 : { f_1 : v_1, .... } # option, step |
|
|
|
a_0 : { .. a_n : { _header : [ .. ], _data : [ rows... ] # table, node |
|
|
|
b) tree : as a tree - the rows must be unique identified by the first column |
|
|
|
a_0 is keyword in CSV_HEADER_START |
|
|
|
a_0 : { .. a_n : { _header : [ fields.. ], _data : { field : value } |
|
|
|
c) keys : as a tree - the rows must be unique identified by the first column |
|
|
|
a_0 is keyword in CSV_HEADER_START |
|
|
|
a_1 ... a_n is key characterized by header-field like _fk* or _pk* |
|
|
|
a_0 : { .. a_n : { _keys : [ _fpk*.. ] , _header : [ fields.. ], _data : { pk_0 : { ... pk_n : { field : value } |
|
|
|
""" |
|
|
|
data = {} |
|
|
|
header = [] |
|
|
|
h = [] # from a[] |
|
|
|
lines = utils.file_tool.readFileLines(msg, filename) |
|
|
|
status = "start" |
|
|
|
tableDict = {} |
|
|
|
for l in lines: |
|
|
|
print("lines "+l) |
|
|
|
fields = l.split(CSV_DELIMITER) |
|
|
@ -121,49 +140,64 @@ def getCsvSpec(msg, filename, type): |
|
|
|
data[a[0]][a[1]] = fields[1] |
|
|
|
continue |
|
|
|
elif (a[0].lower() in CSV_HEADER_START): |
|
|
|
# create deep structure a_0 ... a_n |
|
|
|
print("tdata 136 CSV_HEADER_START "+str(len(a))) |
|
|
|
if (status == CSV_SPECTYPE_DATA): |
|
|
|
setSpecDict(msg, h, data, tableDict) |
|
|
|
#CSV_SPECTYPE_DATA |
|
|
|
#CSV_SPECTYPE_TREE |
|
|
|
#CSV_SPECTYPE_KEYS |
|
|
|
tableDict = {} |
|
|
|
tableDict[B.DATA_NODE_HEADER] = [] |
|
|
|
if type == CSV_SPECTYPE_DATA: |
|
|
|
tableDict[B.DATA_NODE_DATA] = [] |
|
|
|
else: |
|
|
|
tableDict[B.DATA_NODE_DATA] = [] |
|
|
|
#CSV_SPECTYPE_DATA |
|
|
|
#CSV_SPECTYPE_TREE |
|
|
|
#CSV_SPECTYPE_KEYS |
|
|
|
# save header in separate structure |
|
|
|
i = 0 |
|
|
|
for f in fields: |
|
|
|
i += 1 |
|
|
|
if i == 1: continue |
|
|
|
if i <= 1: continue |
|
|
|
header.append(f) |
|
|
|
status = CSV_SPECTYPE_DATA |
|
|
|
h = a |
|
|
|
continue |
|
|
|
elif (status == CSV_SPECTYPE_DATA): |
|
|
|
# check A-col for substructure |
|
|
|
if (a[0] not in data): |
|
|
|
data[a[0]] = {} |
|
|
|
if len(a) == 1 and type == CSV_SPECTYPE_DATA: |
|
|
|
data[a[0]][B.DATA_NODE_DATA] = [] |
|
|
|
# its a component |
|
|
|
if len(a) > 1 and a[1] not in data[a[0]]: |
|
|
|
data[a[0]][a[1]] = {} |
|
|
|
if len(a) == 2 and type == CSV_SPECTYPE_DATA: |
|
|
|
data[a[0]][a[1]]["_data"] = [] |
|
|
|
if len(a) > 2 and a[2] not in data[a[0]][a[1]]: |
|
|
|
data[a[0]][a[1]][a[2]] = {} |
|
|
|
if len(a) == 3 and type == CSV_SPECTYPE_DATA: |
|
|
|
data[a[0]][a[1]][a[1]][B.DATA_NODE_DATA] = [] |
|
|
|
# fill data |
|
|
|
row = {} |
|
|
|
i = 1 |
|
|
|
# case-differentiation DATA or TREE |
|
|
|
for f in header: |
|
|
|
row[f] = fields[i] |
|
|
|
i += 1 |
|
|
|
if len(a) == 1 and type == CSV_SPECTYPE_DATA: |
|
|
|
data[a[0]][B.DATA_NODE_DATA].append(row) |
|
|
|
elif len(a) == 1 and type == CSV_SPECTYPE_DATA: |
|
|
|
data[a[0]] = {f: row} |
|
|
|
elif len(a) == 2 and type == CSV_SPECTYPE_DATA: |
|
|
|
data[a[0]][a[1]][B.DATA_NODE_DATA].append(row) |
|
|
|
elif len(a) == 1 and type == CSV_SPECTYPE_DATA: |
|
|
|
data[a[0]][a[1]][B.DATA_NODE_DATA] = {f: row} |
|
|
|
elif len(a) == 3 and type == CSV_SPECTYPE_DATA: |
|
|
|
data[a[0]][a[1]][a[2]] = row |
|
|
|
elif len(a) == 1 and type == CSV_SPECTYPE_DATA: |
|
|
|
data[a[0]][a[1]][a[2]] = {f: row} |
|
|
|
if type == CSV_SPECTYPE_TREE: |
|
|
|
tableDict[B.DATA_NODE_DATA][f] = fields[i] |
|
|
|
if type == CSV_SPECTYPE_DATA: |
|
|
|
tableDict[B.DATA_NODE_DATA].append(row) |
|
|
|
if (status == CSV_SPECTYPE_DATA): |
|
|
|
setSpecDict(msg, h, data, tableDict) |
|
|
|
print("return getCsvSpec "+str(data)) |
|
|
|
return data |
|
|
|
|
|
|
|
def setSpecDict(msg, path, data, tabledata): |
|
|
|
if len(path) >= 2 and path[1] not in data[path[0]]: |
|
|
|
data[path[0]][path[1]] = {} |
|
|
|
if len(path) >= 3 and path[2] not in data[path[0]][path[1]]: |
|
|
|
data[path[0]][path[1]][path[2]] = {} |
|
|
|
if len(path) >= 4 and path[3] not in data[path[0]][path[1]][path[2]]: |
|
|
|
data[path[0]][path[1]][path[2]][path[3]] = {} |
|
|
|
if len(path) == 2: |
|
|
|
data[path[0]][path[1]] = tabledata |
|
|
|
elif len(path) == 3: |
|
|
|
data[path[0]][path[1]][path[2]] = tabledata |
|
|
|
elif len(path) == 4: |
|
|
|
data[path[0]][path[1]][path[2]][path[3]] = tabledata |
|
|
|
pass |
|
|
|
|
|
|
|
def readCsv(msg, filename, comp): |
|
|
|
job = basic.program.Job.getInstance() |
|
|
|
verify = -1+job.getDebugLevel("tdata_tool") |
|
|
|