|
@ -26,6 +26,7 @@ import basic.program |
|
|
import utils.file_tool |
|
|
import utils.file_tool |
|
|
import basic.constants as B |
|
|
import basic.constants as B |
|
|
import utils.data_const as D |
|
|
import utils.data_const as D |
|
|
|
|
|
import utils.date_tool |
|
|
|
|
|
|
|
|
TOOL_NAME = "tdata_tool" |
|
|
TOOL_NAME = "tdata_tool" |
|
|
""" name of the tool in order to switch debug-info on """ |
|
|
""" name of the tool in order to switch debug-info on """ |
|
@ -71,13 +72,24 @@ def getTestdata(): |
|
|
# read file in testdata |
|
|
# read file in testdata |
|
|
job.m.logInfo("Test-Data readed from " + tdata[D.ATTR_SRC_TYPE] + " for " + tdata[D.ATTR_SRC_NAME]) |
|
|
job.m.logInfo("Test-Data readed from " + tdata[D.ATTR_SRC_TYPE] + " for " + tdata[D.ATTR_SRC_NAME]) |
|
|
elif tdata[D.ATTR_SRC_TYPE] == D.DATA_SRC_DIR: |
|
|
elif tdata[D.ATTR_SRC_TYPE] == D.DATA_SRC_DIR: |
|
|
filename = os.path.join(job.conf.getJobConf(B.SUBJECT_PATH+":"+B.D.ATTR_PATH_TDATA), tdata[D.ATTR_SRC_NAME], "testspec.csv") |
|
|
path = os.path.join(job.conf.getJobConf(B.SUBJECT_PATH+":"+B.ATTR_PATH_TDATA), tdata[D.ATTR_SRC_NAME]) |
|
|
|
|
|
filename = os.path.join(path , "testspec.csv") |
|
|
data = getCsvSpec(job.m, filename, D.CSV_SPECTYPE_DATA) |
|
|
data = getCsvSpec(job.m, filename, D.CSV_SPECTYPE_DATA) |
|
|
for k in data: |
|
|
for k in data: |
|
|
tdata[k] = data[k] |
|
|
tdata[k] = data[k] |
|
|
if (k == D.CSV_BLOCK_OPTION): |
|
|
if (k == D.CSV_BLOCK_OPTION): |
|
|
for p in data[k]: |
|
|
for p in data[k]: |
|
|
setattr(job.par, p, data[k][p]) |
|
|
setattr(job.par, p, data[k][p]) |
|
|
|
|
|
files = utils.file_tool.getFiles(job.m, path, "table_", None) |
|
|
|
|
|
for f in files: |
|
|
|
|
|
print(f) |
|
|
|
|
|
filename = os.path.join(path, f) |
|
|
|
|
|
data = readCsv(job.m, filename, None) |
|
|
|
|
|
table = f[6:-4] |
|
|
|
|
|
print(filename+" "+table) |
|
|
|
|
|
if B.DATA_NODE_TABLES not in tdata: |
|
|
|
|
|
tdata[B.DATA_NODE_TABLES] = {} |
|
|
|
|
|
tdata[B.DATA_NODE_TABLES][table] = data[B.DATA_NODE_TABLES][table] |
|
|
else: |
|
|
else: |
|
|
job.m.setFatal("test-Data: reftyp " + tdata[D.ATTR_SRC_TYPE] + " is not implemented") |
|
|
job.m.setFatal("test-Data: reftyp " + tdata[D.ATTR_SRC_TYPE] + " is not implemented") |
|
|
return tdata |
|
|
return tdata |
|
@ -110,7 +122,7 @@ def parseCsvSpec(msg, lines, type): |
|
|
header = [] |
|
|
header = [] |
|
|
h = [] # from a[] |
|
|
h = [] # from a[] |
|
|
status = "start" |
|
|
status = "start" |
|
|
|
|
|
tableDate = utils.date_tool.getActdate(utils.date_tool.F_DE) |
|
|
tableDict = {} |
|
|
tableDict = {} |
|
|
for l in lines: |
|
|
for l in lines: |
|
|
print("lines "+l) |
|
|
print("lines "+l) |
|
@ -145,12 +157,15 @@ def parseCsvSpec(msg, lines, type): |
|
|
raise Exception(D.EXCP_MALFORMAT+""+l) |
|
|
raise Exception(D.EXCP_MALFORMAT+""+l) |
|
|
data[a[0]][a[1]] = fields[1] |
|
|
data[a[0]][a[1]] = fields[1] |
|
|
continue |
|
|
continue |
|
|
|
|
|
elif a[0].lower() == D.DATA_ATTR_DATE: |
|
|
|
|
|
tableDate = fields[1] |
|
|
elif (a[0].lower() in D.CSV_HEADER_START): |
|
|
elif (a[0].lower() in D.CSV_HEADER_START): |
|
|
# create deep structure a_0 ... a_n |
|
|
# create deep structure a_0 ... a_n |
|
|
print("tdata 136 CSV_HEADER_START "+str(len(a))) |
|
|
print("tdata 136 CSV_HEADER_START "+str(len(a))) |
|
|
h = a |
|
|
h = a |
|
|
data[B.DATA_NODE_TABLES] = {} |
|
|
data[B.DATA_NODE_TABLES] = {} |
|
|
h[0] = B.DATA_NODE_TABLES |
|
|
h[0] = B.DATA_NODE_TABLES |
|
|
|
|
|
comps = {} |
|
|
tableDict = getTabContent(msg, data, h) |
|
|
tableDict = getTabContent(msg, data, h) |
|
|
i = 0 |
|
|
i = 0 |
|
|
for f in fields: |
|
|
for f in fields: |
|
@ -171,6 +186,7 @@ def parseCsvSpec(msg, lines, type): |
|
|
headerFields = [] |
|
|
headerFields = [] |
|
|
else: |
|
|
else: |
|
|
tableDict[B.DATA_NODE_DATA] = [] |
|
|
tableDict[B.DATA_NODE_DATA] = [] |
|
|
|
|
|
tableDict[D.DATA_ATTR_DATE] = tableDate |
|
|
setTabContent(msg, data, tableDict, h) |
|
|
setTabContent(msg, data, tableDict, h) |
|
|
status = D.CSV_SPECTYPE_DATA |
|
|
status = D.CSV_SPECTYPE_DATA |
|
|
continue |
|
|
continue |
|
@ -187,6 +203,15 @@ def parseCsvSpec(msg, lines, type): |
|
|
tableDict[B.DATA_NODE_DATA][f] = fields[i] |
|
|
tableDict[B.DATA_NODE_DATA][f] = fields[i] |
|
|
i += 1 |
|
|
i += 1 |
|
|
if type == D.CSV_SPECTYPE_DATA: |
|
|
if type == D.CSV_SPECTYPE_DATA: |
|
|
|
|
|
print("parseSpec "+ str(fields[0])) |
|
|
|
|
|
for c in fields[0].split(","): |
|
|
|
|
|
a = c.split(":") |
|
|
|
|
|
print("parseSpec " + str(a)) |
|
|
|
|
|
comps[a[0]] = a[1] |
|
|
|
|
|
row[B.ATTR_DATA_COMP] = {} |
|
|
|
|
|
row[B.ATTR_DATA_COMP][a[0]] = a[1] |
|
|
|
|
|
#row[B.ATTR_DATA_COMP] = fields[0].split(",") |
|
|
|
|
|
tableDict[B.ATTR_DATA_COMP] = comps |
|
|
tableDict[B.DATA_NODE_DATA].append(row) |
|
|
tableDict[B.DATA_NODE_DATA].append(row) |
|
|
elif type == D.CSV_SPECTYPE_KEYS: |
|
|
elif type == D.CSV_SPECTYPE_KEYS: |
|
|
tableDict[D.CSV_NODETYPE_KEYS][fields[1]] = row |
|
|
tableDict[D.CSV_NODETYPE_KEYS][fields[1]] = row |
|
@ -205,6 +230,13 @@ def parseCsvSpec(msg, lines, type): |
|
|
return data |
|
|
return data |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def mergeTableComponents(comps, rowComps): |
|
|
|
|
|
for c in rowComps.split(","): |
|
|
|
|
|
a = c.split(":") |
|
|
|
|
|
comps[a[0]] = a[1] |
|
|
|
|
|
return comps |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def setTabContent(msg, data, tabledata, path): |
|
|
def setTabContent(msg, data, tabledata, path): |
|
|
if len(path) >= 2 and path[1] not in data[path[0]]: |
|
|
if len(path) >= 2 and path[1] not in data[path[0]]: |
|
|
data[path[0]][path[1]] = {} |
|
|
data[path[0]][path[1]] = {} |
|
@ -238,13 +270,15 @@ def getTabContent(msg, data, path): |
|
|
|
|
|
|
|
|
def readCsv(msg, filename, comp, aliasNode=""): |
|
|
def readCsv(msg, filename, comp, aliasNode=""): |
|
|
lines = utils.file_tool.readFileLines(filename, msg) |
|
|
lines = utils.file_tool.readFileLines(filename, msg) |
|
|
|
|
|
print("readCsv "+filename) |
|
|
|
|
|
print(lines) |
|
|
return parseCsv(msg, filename, lines, comp, aliasNode) |
|
|
return parseCsv(msg, filename, lines, comp, aliasNode) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def parseCsv(msg, filename, lines, comp, aliasNode=""): |
|
|
def parseCsv(msg, filename, lines, comp, aliasNode=""): |
|
|
job = basic.program.Job.getInstance() |
|
|
job = basic.program.Job.getInstance() |
|
|
verify = -4+job.getDebugLevel(TOOL_NAME) |
|
|
verify = -4+job.getDebugLevel(TOOL_NAME) |
|
|
job.debug(verify, "# # # # # # # # parseCsv " + filename + " :" + comp.name + ": " + str(lines)) |
|
|
job.debug(verify, "# # # # # # # # parseCsv " + filename + " :" + str(lines)) |
|
|
fields = [] |
|
|
fields = [] |
|
|
nodes = [] |
|
|
nodes = [] |
|
|
columns = [] |
|
|
columns = [] |
|
@ -264,14 +298,22 @@ def parseCsv(msg, filename, lines, comp, aliasNode=""): |
|
|
job.debug(verify, str(state) + " line " + line + " :" + str(len(fields)) + ": " + str(fields)) |
|
|
job.debug(verify, str(state) + " line " + line + " :" + str(len(fields)) + ": " + str(fields)) |
|
|
if len(testline) < 2 and state < 1: |
|
|
if len(testline) < 2 and state < 1: |
|
|
state = 0 |
|
|
state = 0 |
|
|
elif a[0].lower() == D.ATTR_TABLE_DATE: |
|
|
elif a[0].lower() == D.DATA_ATTR_DATE: |
|
|
tableDate = fields[1] |
|
|
tableDate = fields[1] |
|
|
elif a[0].lower() == D.ATTR_TABLE_CNT: |
|
|
state = 1 |
|
|
|
|
|
elif a[0].lower() == D.DATA_ATTR_COUNT: |
|
|
tableCnt = fields[1] |
|
|
tableCnt = fields[1] |
|
|
elif a[0].lower() in D.CSV_HEADER_START: |
|
|
state = 1 |
|
|
|
|
|
elif a[0].lower() in D.CSV_HEADER_START or \ |
|
|
|
|
|
(comp is not None and state == 1 |
|
|
|
|
|
and isCompTableFile(comp, filename)): |
|
|
state = 2 |
|
|
state = 2 |
|
|
columns = [] |
|
|
columns = [] |
|
|
h = a |
|
|
h = a |
|
|
|
|
|
if len(h) < 2 and comp is not None: |
|
|
|
|
|
a = ["table", basename] |
|
|
|
|
|
h = a |
|
|
|
|
|
startCols = 0 |
|
|
cnt = len(fields) |
|
|
cnt = len(fields) |
|
|
job.debug(verify, str(state) + " cnt " + str(cnt)) |
|
|
job.debug(verify, str(state) + " cnt " + str(cnt)) |
|
|
data[B.DATA_NODE_TABLES] = {} |
|
|
data[B.DATA_NODE_TABLES] = {} |
|
@ -287,10 +329,11 @@ def parseCsv(msg, filename, lines, comp, aliasNode=""): |
|
|
nodes.append(h[i]) |
|
|
nodes.append(h[i]) |
|
|
job.debug(verify, str(state) + " nodes " + str(nodes)) |
|
|
job.debug(verify, str(state) + " nodes " + str(nodes)) |
|
|
tableDict = getTabContent(msg, data, h) |
|
|
tableDict = getTabContent(msg, data, h) |
|
|
|
|
|
tableDict[B.ATTR_DATA_COMP] = {} |
|
|
if len(tableDate) > 6: |
|
|
if len(tableDate) > 6: |
|
|
tableDict[D.ATTR_TABLE_DATE] = tableDate |
|
|
tableDict[D.DATA_ATTR_DATE] = tableDate |
|
|
if int(tableCnt) > 0: |
|
|
if int(tableCnt) > 0: |
|
|
tableDict[D.ATTR_TABLE_CNT] = tableCnt |
|
|
tableDict[D.DATA_ATTR_COUNT] = tableCnt |
|
|
j = 0 |
|
|
j = 0 |
|
|
for i in range(1, cnt): |
|
|
for i in range(1, cnt): |
|
|
if fields[i][0:1] == "_": |
|
|
if fields[i][0:1] == "_": |
|
@ -308,7 +351,14 @@ def parseCsv(msg, filename, lines, comp, aliasNode=""): |
|
|
tableDict = getTabContent(msg, data, h) |
|
|
tableDict = getTabContent(msg, data, h) |
|
|
state = 3 |
|
|
state = 3 |
|
|
row = {} |
|
|
row = {} |
|
|
|
|
|
print(line) |
|
|
|
|
|
if startCols > 0: |
|
|
|
|
|
row[B.ATTR_DATA_COMP] = {} |
|
|
|
|
|
row[B.ATTR_DATA_COMP][a[0]] = a[1] |
|
|
|
|
|
tableDict[B.ATTR_DATA_COMP][a[0]] = a[1] |
|
|
for i in range(startCols, cnt+startCols): |
|
|
for i in range(startCols, cnt+startCols): |
|
|
|
|
|
print("for "+str(i)+" "+str(len(row))+" "+str(startCols)+" "+str(len(fields))) |
|
|
|
|
|
print(str(fields[i])) |
|
|
if i >= len(columns)+startCols: |
|
|
if i >= len(columns)+startCols: |
|
|
break |
|
|
break |
|
|
row[columns[i-startCols]] = fields[i] |
|
|
row[columns[i-startCols]] = fields[i] |
|
@ -358,25 +408,38 @@ def buildCsvData(filename, tdata, comp): |
|
|
:param comp: if specific else None |
|
|
:param comp: if specific else None |
|
|
:return: |
|
|
:return: |
|
|
""" |
|
|
""" |
|
|
|
|
|
compColumn = not isCompTableFile(comp, filename) |
|
|
job = basic.program.Job.getInstance() |
|
|
job = basic.program.Job.getInstance() |
|
|
verify = -1+job.getDebugLevel(TOOL_NAME) |
|
|
verify = -1+job.getDebugLevel(TOOL_NAME) |
|
|
job.debug(verify, "writeDataTable " + str(comp)) |
|
|
job.debug(verify, "writeDataTable " + str(comp)) |
|
|
text = "" |
|
|
text = "" |
|
|
for k in [D.ATTR_TABLE_DATE, D.ATTR_TABLE_CNT]: |
|
|
for k in [D.DATA_ATTR_DATE, D.DATA_ATTR_COUNT]: |
|
|
if k in tdata: |
|
|
if k in tdata: |
|
|
text += k+";"+tdata[k]+"\n" |
|
|
text += k+";"+str(tdata[k])+"\n" |
|
|
text += "table" |
|
|
header = "table" |
|
|
for f in tdata[B.DATA_NODE_HEADER]: |
|
|
for f in tdata[B.DATA_NODE_HEADER]: |
|
|
text += ";"+f |
|
|
header += ";"+f |
|
|
|
|
|
if compColumn: |
|
|
|
|
|
text += header |
|
|
|
|
|
else: |
|
|
|
|
|
#text += "_nr;" + header[6:] + "\n" |
|
|
|
|
|
text += header[6:] + "\n" |
|
|
|
|
|
i = 0 |
|
|
for r in tdata[B.DATA_NODE_DATA]: |
|
|
for r in tdata[B.DATA_NODE_DATA]: |
|
|
text += "\n" |
|
|
row = "" |
|
|
|
|
|
i += 1 |
|
|
for f in tdata[B.DATA_NODE_HEADER]: |
|
|
for f in tdata[B.DATA_NODE_HEADER]: |
|
|
if f in r: |
|
|
if f in r: |
|
|
text += ";"+str(r[f]) |
|
|
row += ";"+str(r[f]) |
|
|
else: |
|
|
else: |
|
|
text += ";" |
|
|
row += ";" |
|
|
text += "\n" |
|
|
if compColumn: |
|
|
return text |
|
|
text += row |
|
|
|
|
|
else: |
|
|
|
|
|
text += row[1:] |
|
|
|
|
|
#text += str(i) + row |
|
|
|
|
|
text += "\n" |
|
|
|
|
|
return text |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def writeCsvData(filename, tdata, comp): |
|
|
def writeCsvData(filename, tdata, comp): |
|
@ -386,3 +449,14 @@ def writeCsvData(filename, tdata, comp): |
|
|
text += buildCsvData(filename, tdata[B.DATA_NODE_TABLES][k], comp) |
|
|
text += buildCsvData(filename, tdata[B.DATA_NODE_TABLES][k], comp) |
|
|
text += "\n" |
|
|
text += "\n" |
|
|
utils.file_tool.writeFileText(comp.m, filename, text) |
|
|
utils.file_tool.writeFileText(comp.m, filename, text) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def isCompTableFile(comp, filename): |
|
|
|
|
|
""" check if the filename belongs to the component """ |
|
|
|
|
|
basetable = os.path.basename(filename)[0:-4] |
|
|
|
|
|
if comp is None: |
|
|
|
|
|
return False |
|
|
|
|
|
if B.TOPIC_NODE_DB in comp.conf[B.SUBJECT_ARTS] and basetable in comp.conf[B.DATA_NODE_DDL] \ |
|
|
|
|
|
and comp.name in filename: |
|
|
|
|
|
return True |
|
|
|
|
|
return False |