From a95f8007339dcaeb59b4c58d1a2fa459f40be371 Mon Sep 17 00:00:00 2001 From: Ulrich Carmesin Date: Mon, 27 Jun 2022 23:26:01 +0200 Subject: [PATCH] table-attributes in readCsv and writeCsv --- test/test_config.py | 6 +-- test/test_tdata.py | 53 +++++++++++++++++++++++++- utils/data_const.py | 2 + utils/tdata_tool.py | 92 +++++++++++++++++++++++++++++++-------------- 4 files changed, 120 insertions(+), 33 deletions(-) diff --git a/test/test_config.py b/test/test_config.py index 44de3d7..8af2915 100644 --- a/test/test_config.py +++ b/test/test_config.py @@ -45,10 +45,10 @@ class MyTestCase(unittest.TestCase): global mymsg actfunction = str(inspect.currentframe().f_code.co_name) cnttest = 0 - if actfunction not in TEST_FUNCTIONS or True: + if actfunction not in TEST_FUNCTIONS: return job = test.testtools.getJob() - componentName = "testa" + componentName = "testcm" confs = utils.config_tool.getConfig("comp", componentName) conns = utils.conn_tool.getConnections(componentName) self.assertEqual(confs["conf"][B.SUBJECT_INST][B.ATTR_INST_CNT], 1) @@ -59,7 +59,7 @@ class MyTestCase(unittest.TestCase): cnttest += 1 # it overwrites self.assertEqual(confs["conf"][B.SUBJECT_INST][B.ATTR_INST_SGL], "n") cnttest += 1 # it keep - componentName = "testa1" + componentName = "testprddb" confs = utils.config_tool.getConfig("comp", componentName) conns = utils.conn_tool.getConnections(componentName) self.assertNotIn(B.ATTR_DB_TYPE, confs["conf"][B.SUBJECT_ARTS][B.TOPIC_NODE_DB]) diff --git a/test/test_tdata.py b/test/test_tdata.py index 4ace82a..b8637cb 100644 --- a/test/test_tdata.py +++ b/test/test_tdata.py @@ -3,9 +3,12 @@ import inspect import utils.tdata_tool as t import basic.constants as B import utils.data_const as D +import utils.path_const as P +import utils.config_tool import test.testtools import test.constants import basic.program +import utils.path_tool import os HOME_PATH = test.constants.HOME_PATH @@ -14,8 +17,8 @@ OS_SYSTEM = test.constants.OS_SYSTEM # here you can select single testfunction for developping the tests TEST_FUNCTIONS = ["test_tdata", "test_getCsvSpec_data", "test_getCsvSpec_tree", "test_getCsvSpec_key", - "test_getCsvSpec_conf", "test_extractPattern"] -#TEST_FUNCTIONS = ["test_getCsvSpec_key"] + "test_getCsvSpec_conf", "test_extractPattern", "test_parseCsv"] +TEST_FUNCTIONS = ["test_parseCsv"] class MyTestCase(unittest.TestCase): mymsg = "--------------------------------------------------------------" @@ -249,6 +252,52 @@ class MyTestCase(unittest.TestCase): MyTestCase.mymsg += "\n----- "+actfunction+" : "+str(cnttest) + def test_parseCsv(self): + global mymsg + actfunction = str(inspect.currentframe().f_code.co_name) + cnttest = 0 + if actfunction not in TEST_FUNCTIONS: + return + job = test.testtools.getJob() + cm = basic.componentHandling.ComponentManager.getInstance("J") + componentName = "testcrmdb" + confs = utils.config_tool.getConfig("comp", componentName) + conns = utils.conn_tool.getConnections(componentName) + comp = cm.createInstance(componentName, None, confs, conns, 1) + fileLines = [ + "table:person;_nr;famname;name;birth;sex", + "testcrmdb:person;1;Brecht;Bert;10.02.98;m", + "testcrmdb:person;2;Leon;Donna;28.09.42;f", + "#;;;;;;" + ] + filename = utils.path_tool.composePath(P.P_TCBASE, "t_person.csv") + tdata = t.parseCsv(comp.m, filename, fileLines, comp, aliasNode="") + print(str(tdata)) + self.assertIn(B.DATA_NODE_TABLES, tdata) + self.assertIn("person", tdata[B.DATA_NODE_TABLES]) + self.assertEqual(2, len(tdata[B.DATA_NODE_TABLES]["person"][B.DATA_NODE_DATA])) + cnttest += 3 + fileLines = [ + "date;27.06.2022", + "count;2", + "table:person;_nr;famname;name;birth;sex", + "testcrmdb:person;1;Brecht;Bert;10.02.98;m", + "testcrmdb:person;2;Leon;Donna;28.09.42;f", + "#;;;;;;" + ] + tdata = t.parseCsv(comp.m, filename, fileLines, comp, aliasNode="") + self.assertIn(B.DATA_NODE_TABLES, tdata) + self.assertIn("person", tdata[B.DATA_NODE_TABLES]) + self.assertEqual(2, len(tdata[B.DATA_NODE_TABLES]["person"][B.DATA_NODE_DATA])) + cnttest += 3 + text = "" + for k in tdata[B.DATA_NODE_TABLES]: + text += t.buildCsvData(filename, tdata[B.DATA_NODE_TABLES][k], comp) + text += "\n" + print(text) + MyTestCase.mymsg += "\n----- "+actfunction+" : "+str(cnttest) + + def test_zzz(self): print(MyTestCase.mymsg) diff --git a/utils/data_const.py b/utils/data_const.py index ca6cf74..1080ca7 100644 --- a/utils/data_const.py +++ b/utils/data_const.py @@ -54,3 +54,5 @@ DEFAULT_DB_PARTITION = "n" DEFAULT_DB_CONN_JAR = "n" """ attribute for connection-jar-file instead of connection by ip, port """ +ATTR_TABLE_DATE = "date" +ATTR_TABLE_CNT = "count" diff --git a/utils/tdata_tool.py b/utils/tdata_tool.py index a43423a..6539a8d 100644 --- a/utils/tdata_tool.py +++ b/utils/tdata_tool.py @@ -237,33 +237,63 @@ def getTabContent(msg, data, path): pass def readCsv(msg, filename, comp, aliasNode=""): + lines = utils.file_tool.readFileLines(filename, msg) + return parseCsv(msg, filename, lines, comp, aliasNode) + + +def parseCsv(msg, filename, lines, comp, aliasNode=""): job = basic.program.Job.getInstance() - verify = -1+job.getDebugLevel(TOOL_NAME) - job.debug(verify, "readCsv " + filename) + verify = -4+job.getDebugLevel(TOOL_NAME) + job.debug(verify, "# # # # # # # # parseCsv " + filename + " :" + comp.name + ": " + str(lines)) fields = [] nodes = [] columns = [] output = {} state = 0 - data = [] + data = {} + tableDict = {} + tableDate = "" + tableCnt = 0 cnt = 0 - lines = utils.file_tool.readFileLines(filename, msg) basename = os.path.basename(filename)[0:-4] startCols = 1 for line in lines: fields = line.split(';') testline = line.replace(";", "") + a = fields[0].split(':') job.debug(verify, str(state) + " line " + line + " :" + str(len(fields)) + ": " + str(fields)) if len(testline) < 2 and state < 1: state = 0 - elif fields[0].lower() in D.CSV_HEADER_START: + elif a[0].lower() == D.ATTR_TABLE_DATE: + tableDate = fields[1] + elif a[0].lower() == D.ATTR_TABLE_CNT: + tableCnt = fields[1] + elif a[0].lower() in D.CSV_HEADER_START: state = 2 columns = [] + h = a cnt = len(fields) job.debug(verify, str(state) + " cnt " + str(cnt)) + data[B.DATA_NODE_TABLES] = {} + h[0] = B.DATA_NODE_TABLES + if not aliasNode.isspace() and len(aliasNode) > 3: + struct = aliasNode.split(":") + for x in struct: + if len(x) > 2: + nodes.append(x) + job.debug(verify, str(state) + " nodes " + str(nodes)) + elif len(h) > 1: + for i in range(1, len(h)): + nodes.append(h[i]) + job.debug(verify, str(state) + " nodes " + str(nodes)) + tableDict = getTabContent(msg, data, h) + if len(tableDate) > 6: + tableDict[D.ATTR_TABLE_DATE] = tableDate + if int(tableCnt) > 0: + tableDict[D.ATTR_TABLE_CNT] = tableCnt j = 0 for i in range(1, cnt): - if fields[0][0:1] == "_": + if fields[i][0:1] == "_": startCols += 1 continue job.debug(verify, str(i) + " cnt " + str(fields[i])) @@ -271,35 +301,27 @@ def readCsv(msg, filename, comp, aliasNode=""): columns.append(fields[i]) j = j + 1 cnt = j + tableDict[B.DATA_NODE_HEADER] = columns job.debug(verify, str(state) + " " + str(cnt) + " cols " + str(columns)) elif state >= 2 and len(testline) > 2: - if state == 2 and not aliasNode.isspace(): - struct = aliasNode.split(":") - for x in struct: - if len(x) > 2: - nodes.append(x) - job.debug(verify, str(state) + " nodes " + str(nodes)) - elif state == 2 and not fields[0].isspace(): - struct = fields[0].split(":") - for x in struct: - if len(x) > 2: - nodes.append(x) - job.debug(verify, str(state) + " nodes " + str(nodes)) + job.debug(verify, str(state) + " " + str(len(testline))) + tableDict = getTabContent(msg, data, h) state = 3 row = {} - for i in range(startCols, cnt): + for i in range(startCols, cnt+startCols): + if i >= len(columns)+startCols: + break row[columns[i-startCols]] = fields[i] job.debug(verify, str(state) + " row " + str(row)) - data.append(row) + if B.DATA_NODE_DATA not in tableDict: + tableDict[B.DATA_NODE_DATA] = [] + tableDict[B.DATA_NODE_DATA].append(row) + setTabContent(msg, data, tableDict, h) elif state == 3: job.debug(verify, "structure " + str(state) + ": " + str(nodes)) - output = setSubnode(0, nodes, data, output) - data = [] state = 0 - if len(nodes) < 1: - nodes.append(basename) - output = setSubnode(0, nodes, data, output) - return output + return data + def setSubnode(i, nodes, data, tree): print("setSubnode " + str(i) + ": " + ": " + str(tree)) @@ -327,7 +349,8 @@ def normalizeDataRow(dstruct, xpathtupel, row, referencedate): verify = -1+job.getDebugLevel(TOOL_NAME) job.debug(verify, "calcDataRow " + row) -def writeCsvData(filename, tdata, comp): + +def buildCsvData(filename, tdata, comp): """ writes the testdata into a csv-file for documentation of the test-run :param teststatus: @@ -338,7 +361,11 @@ def writeCsvData(filename, tdata, comp): job = basic.program.Job.getInstance() verify = -1+job.getDebugLevel(TOOL_NAME) job.debug(verify, "writeDataTable " + str(comp)) - text = "table" + text = "" + for k in [D.ATTR_TABLE_DATE, D.ATTR_TABLE_CNT]: + if k in tdata: + text += k+";"+tdata[k]+"\n" + text += "table" for f in tdata[B.DATA_NODE_HEADER]: text += ";"+f for r in tdata[B.DATA_NODE_DATA]: @@ -349,4 +376,13 @@ def writeCsvData(filename, tdata, comp): else: text += ";" text += "\n" + return text + + +def writeCsvData(filename, tdata, comp): + text = "" + if B.DATA_NODE_TABLES in tdata: + for k in tdata[B.DATA_NODE_TABLES]: + text += buildCsvData(filename, tdata[B.DATA_NODE_TABLES][k], comp) + text += "\n" utils.file_tool.writeFileText(comp.m, filename, text)