|
|
@ -237,33 +237,63 @@ def getTabContent(msg, data, path): |
|
|
|
pass |
|
|
|
|
|
|
|
def readCsv(msg, filename, comp, aliasNode=""): |
|
|
|
lines = utils.file_tool.readFileLines(filename, msg) |
|
|
|
return parseCsv(msg, filename, lines, comp, aliasNode) |
|
|
|
|
|
|
|
|
|
|
|
def parseCsv(msg, filename, lines, comp, aliasNode=""): |
|
|
|
job = basic.program.Job.getInstance() |
|
|
|
verify = -1+job.getDebugLevel(TOOL_NAME) |
|
|
|
job.debug(verify, "readCsv " + filename) |
|
|
|
verify = -4+job.getDebugLevel(TOOL_NAME) |
|
|
|
job.debug(verify, "# # # # # # # # parseCsv " + filename + " :" + comp.name + ": " + str(lines)) |
|
|
|
fields = [] |
|
|
|
nodes = [] |
|
|
|
columns = [] |
|
|
|
output = {} |
|
|
|
state = 0 |
|
|
|
data = [] |
|
|
|
data = {} |
|
|
|
tableDict = {} |
|
|
|
tableDate = "" |
|
|
|
tableCnt = 0 |
|
|
|
cnt = 0 |
|
|
|
lines = utils.file_tool.readFileLines(filename, msg) |
|
|
|
basename = os.path.basename(filename)[0:-4] |
|
|
|
startCols = 1 |
|
|
|
for line in lines: |
|
|
|
fields = line.split(';') |
|
|
|
testline = line.replace(";", "") |
|
|
|
a = fields[0].split(':') |
|
|
|
job.debug(verify, str(state) + " line " + line + " :" + str(len(fields)) + ": " + str(fields)) |
|
|
|
if len(testline) < 2 and state < 1: |
|
|
|
state = 0 |
|
|
|
elif fields[0].lower() in D.CSV_HEADER_START: |
|
|
|
elif a[0].lower() == D.ATTR_TABLE_DATE: |
|
|
|
tableDate = fields[1] |
|
|
|
elif a[0].lower() == D.ATTR_TABLE_CNT: |
|
|
|
tableCnt = fields[1] |
|
|
|
elif a[0].lower() in D.CSV_HEADER_START: |
|
|
|
state = 2 |
|
|
|
columns = [] |
|
|
|
h = a |
|
|
|
cnt = len(fields) |
|
|
|
job.debug(verify, str(state) + " cnt " + str(cnt)) |
|
|
|
data[B.DATA_NODE_TABLES] = {} |
|
|
|
h[0] = B.DATA_NODE_TABLES |
|
|
|
if not aliasNode.isspace() and len(aliasNode) > 3: |
|
|
|
struct = aliasNode.split(":") |
|
|
|
for x in struct: |
|
|
|
if len(x) > 2: |
|
|
|
nodes.append(x) |
|
|
|
job.debug(verify, str(state) + " nodes " + str(nodes)) |
|
|
|
elif len(h) > 1: |
|
|
|
for i in range(1, len(h)): |
|
|
|
nodes.append(h[i]) |
|
|
|
job.debug(verify, str(state) + " nodes " + str(nodes)) |
|
|
|
tableDict = getTabContent(msg, data, h) |
|
|
|
if len(tableDate) > 6: |
|
|
|
tableDict[D.ATTR_TABLE_DATE] = tableDate |
|
|
|
if int(tableCnt) > 0: |
|
|
|
tableDict[D.ATTR_TABLE_CNT] = tableCnt |
|
|
|
j = 0 |
|
|
|
for i in range(1, cnt): |
|
|
|
if fields[0][0:1] == "_": |
|
|
|
if fields[i][0:1] == "_": |
|
|
|
startCols += 1 |
|
|
|
continue |
|
|
|
job.debug(verify, str(i) + " cnt " + str(fields[i])) |
|
|
@ -271,35 +301,27 @@ def readCsv(msg, filename, comp, aliasNode=""): |
|
|
|
columns.append(fields[i]) |
|
|
|
j = j + 1 |
|
|
|
cnt = j |
|
|
|
tableDict[B.DATA_NODE_HEADER] = columns |
|
|
|
job.debug(verify, str(state) + " " + str(cnt) + " cols " + str(columns)) |
|
|
|
elif state >= 2 and len(testline) > 2: |
|
|
|
if state == 2 and not aliasNode.isspace(): |
|
|
|
struct = aliasNode.split(":") |
|
|
|
for x in struct: |
|
|
|
if len(x) > 2: |
|
|
|
nodes.append(x) |
|
|
|
job.debug(verify, str(state) + " nodes " + str(nodes)) |
|
|
|
elif state == 2 and not fields[0].isspace(): |
|
|
|
struct = fields[0].split(":") |
|
|
|
for x in struct: |
|
|
|
if len(x) > 2: |
|
|
|
nodes.append(x) |
|
|
|
job.debug(verify, str(state) + " nodes " + str(nodes)) |
|
|
|
job.debug(verify, str(state) + " " + str(len(testline))) |
|
|
|
tableDict = getTabContent(msg, data, h) |
|
|
|
state = 3 |
|
|
|
row = {} |
|
|
|
for i in range(startCols, cnt): |
|
|
|
for i in range(startCols, cnt+startCols): |
|
|
|
if i >= len(columns)+startCols: |
|
|
|
break |
|
|
|
row[columns[i-startCols]] = fields[i] |
|
|
|
job.debug(verify, str(state) + " row " + str(row)) |
|
|
|
data.append(row) |
|
|
|
if B.DATA_NODE_DATA not in tableDict: |
|
|
|
tableDict[B.DATA_NODE_DATA] = [] |
|
|
|
tableDict[B.DATA_NODE_DATA].append(row) |
|
|
|
setTabContent(msg, data, tableDict, h) |
|
|
|
elif state == 3: |
|
|
|
job.debug(verify, "structure " + str(state) + ": " + str(nodes)) |
|
|
|
output = setSubnode(0, nodes, data, output) |
|
|
|
data = [] |
|
|
|
state = 0 |
|
|
|
if len(nodes) < 1: |
|
|
|
nodes.append(basename) |
|
|
|
output = setSubnode(0, nodes, data, output) |
|
|
|
return output |
|
|
|
return data |
|
|
|
|
|
|
|
|
|
|
|
def setSubnode(i, nodes, data, tree): |
|
|
|
print("setSubnode " + str(i) + ": " + ": " + str(tree)) |
|
|
@ -327,7 +349,8 @@ def normalizeDataRow(dstruct, xpathtupel, row, referencedate): |
|
|
|
verify = -1+job.getDebugLevel(TOOL_NAME) |
|
|
|
job.debug(verify, "calcDataRow " + row) |
|
|
|
|
|
|
|
def writeCsvData(filename, tdata, comp): |
|
|
|
|
|
|
|
def buildCsvData(filename, tdata, comp): |
|
|
|
""" |
|
|
|
writes the testdata into a csv-file for documentation of the test-run |
|
|
|
:param teststatus: |
|
|
@ -338,7 +361,11 @@ def writeCsvData(filename, tdata, comp): |
|
|
|
job = basic.program.Job.getInstance() |
|
|
|
verify = -1+job.getDebugLevel(TOOL_NAME) |
|
|
|
job.debug(verify, "writeDataTable " + str(comp)) |
|
|
|
text = "table" |
|
|
|
text = "" |
|
|
|
for k in [D.ATTR_TABLE_DATE, D.ATTR_TABLE_CNT]: |
|
|
|
if k in tdata: |
|
|
|
text += k+";"+tdata[k]+"\n" |
|
|
|
text += "table" |
|
|
|
for f in tdata[B.DATA_NODE_HEADER]: |
|
|
|
text += ";"+f |
|
|
|
for r in tdata[B.DATA_NODE_DATA]: |
|
|
@ -349,4 +376,13 @@ def writeCsvData(filename, tdata, comp): |
|
|
|
else: |
|
|
|
text += ";" |
|
|
|
text += "\n" |
|
|
|
return text |
|
|
|
|
|
|
|
|
|
|
|
def writeCsvData(filename, tdata, comp): |
|
|
|
text = "" |
|
|
|
if B.DATA_NODE_TABLES in tdata: |
|
|
|
for k in tdata[B.DATA_NODE_TABLES]: |
|
|
|
text += buildCsvData(filename, tdata[B.DATA_NODE_TABLES][k], comp) |
|
|
|
text += "\n" |
|
|
|
utils.file_tool.writeFileText(comp.m, filename, text) |
|
|
|