Browse Source

csv-files

refactor
Ulrich 2 years ago
parent
commit
2760f83c8a
  1. 103
      test/test_31filecsv.py
  2. 5
      tools/file_tool.py
  3. 85
      tools/filecsv_fcts.py

103
test/test_31filecsv.py

@ -29,10 +29,10 @@ OS_SYSTEM = test.constants.OS_SYSTEM
# the list of TEST_FUNCTIONS defines which function will be really tested.
# if you minimize the list you can check the specific test-function
TEST_FUNCTIONS = ["test_11ddl", "test_12catalog",
"test_02getCsvSpec_data", "test_03getCsvSpec_tree", "test_04getCsvSpec_key",
"test_05getCsvSpec_conf", "test_06parseCsv"]
TEST_FUNCTIONS = ["test_12catalog"]
TEST_FUNCTIONS = ["test_11ddl"]
"test_02getCsvSpec_data", "test_03getCsvSpec_tree", "test_14getCsvSpec_key",
"test_15getCsvSpec_conf", "test_06parseCsv"]
TEST_FUNCTIONS = ["test_11ddl", "test_12catalog", "test_14getCsvSpec_key", "test_15getCsvSpec_conf"]
TEST_FUNCTIONS = ["test_02getCsvSpec_data"]
PROGRAM_NAME = "clean_workspace"
# with this variable you can switch prints on and off
@ -142,7 +142,7 @@ class MyTestCase(unittest.TestCase):
cnttest = 0
if actfunction not in TEST_FUNCTIONS:
return
job = basic.program.SimpleJob(PROGRAM_NAME)
job = test.testtools.getWorkspaceJob(PROGRAM_NAME)
setattr(job.par, "tdtyp", "dir")
setattr(job.par, "tdsrc", "TC0001")
setattr(job.par, "tdname", "testspec")
@ -183,7 +183,7 @@ class MyTestCase(unittest.TestCase):
cnttest = 0
if actfunction not in TEST_FUNCTIONS:
return
job = test.testtools.getJob()
job = test.testtools.getWorkspaceJob(PROGRAM_NAME)
# filename = os.path.join(job.conf["paths"]["testdata"], getattr(job.par, "tdsrc"), getattr(job.par, "tdname") + ".csv")
"""
a) data : like a table with data-array of key-value-pairs
@ -199,7 +199,7 @@ class MyTestCase(unittest.TestCase):
]
f = toolHandling.getFileTool(job, None, "csv")
tdata = f.parseCsv(job.m, job, specLines, D.CSV_SPECTYPE_DATA)
self.assertEqual(0, len(tdata))
self.assertEqual(1, len(tdata))
cnttest += 1
if "malformated" in tests:
malformat = "option;arg;;;;;"
@ -235,49 +235,55 @@ class MyTestCase(unittest.TestCase):
"option:description;something;;;;;",
"#;;;;;;"
]
tdata = f.parseCsvSpec(job.m, specLines, D.CSV_SPECTYPE_DATA, {}, job)
self.assertEqual(1, len(tdata))
tdata = f.parseCsv(job.m, job, specLines, D.CSV_SPECTYPE_DATA)
self.assertEqual(2, len(tdata))
print(tdata)
self.assertIn(D.CSV_BLOCK_OPTION, tdata)
cnttest += 2
if D.CSV_BLOCK_STEP in tests:
specLines = [
"step:1;testa;1;1;table:_lofts,action:import;;;;;",
# "step:1;testa;1;1;table:_lofts,action:import;;;;;",
"step:header;_nr;variant;data;program;comp;args;;;;;;",
";1;testa;person:1;execute_testcase;testrest;action:import;;;;;",
"#;;;;;;"
]
tdata = f.parseCsvSpec(job.m, specLines, D.CSV_SPECTYPE_DATA, {}, job)
tdata = f.parseCsv(job.m, job, specLines, D.CSV_SPECTYPE_DATA)
print(tdata)
self.assertEqual(1, len(tdata))
self.assertEqual(2, len(tdata))
self.assertIn(B.DATA_NODE_STEPS, tdata)
self.assertIsInstance(tdata[B.DATA_NODE_STEPS], list)
self.assertIsInstance(tdata[B.DATA_NODE_STEPS], dict)
self.assertIsInstance(tdata[B.DATA_NODE_STEPS][B.DATA_NODE_DATA], list)
cnttest += 3
for step in tdata[B.DATA_NODE_STEPS]:
for step in tdata[B.DATA_NODE_STEPS][B.DATA_NODE_DATA]:
print(step)
self.assertEqual(hasattr(step, B.DATA_NODE_COMP), True)
# self.assertEqual(hasattr(step, B.ATTR_DATA_REF), True)
self.assertEqual(hasattr(step, B.ATTR_STEP_ARGS), True)
self.assertIn(B.DATA_NODE_COMP, step)
self.assertIn(B.DATA_NODE_ARGS, step)
cnttest += 3
specLines = [
"step:1;testa;1;1;table:_lofts;action:export;;;;;",
"#;;;;;;"
"step:header;_nr;variant;data;program;comp;args;;;;;;",
";1;testa;person:1;execute_testcase;testrest;action:import;var:xyz;;;;"
]
tdata = {}
tdata = f.parseCsvSpec(job.m, specLines, D.CSV_SPECTYPE_DATA, {}, job)
tdata = f.parseCsv(job.m, job, specLines, D.CSV_SPECTYPE_DATA)
print(tdata)
self.assertEqual(1, len(tdata))
self.assertEqual(2, len(tdata))
self.assertIn(B.DATA_NODE_STEPS, tdata)
self.assertIsInstance(tdata[B.DATA_NODE_STEPS], list)
self.assertEqual(2, len(tdata[B.DATA_NODE_STEPS][0].args))
self.assertIsInstance(tdata[B.DATA_NODE_STEPS], dict)
self.assertEqual(2, len(tdata[B.DATA_NODE_STEPS][B.DATA_NODE_DATA][0][B.DATA_NODE_ARGS]))
cnttest += 3
if B.DATA_NODE_TABLES in tests:
text = f.buildCsv(job.m, job, tdata)
spez = "_type;data\n"+self.stripDelimiter(specLines)
self.assertEqual(spez, text)
print(text)
if B.DATA_NODE_TABLES in tdata:
specLines = [
"table:testa:lofts;_nr;street;city;zip;state;beds;baths;sqft;type;price;latitude;longitude",
"testa:lofts;1;stra;town;12345;usa;4;1;50;house;111;45;8",
"#;;;;;;"
]
tdata = f.parseCsvSpec(job.m, specLines, B.DATA_NODE_TABLES, {}, job)
tdata = f.parseCsv(job.m, job, specLines, D.CSV_SPECTYPE_DATA)
print(tdata)
self.assertEqual(1, len(tdata))
self.assertEqual(2, len(tdata))
self.assertIn(B.DATA_NODE_TABLES, tdata)
self.assertIsInstance(tdata[B.DATA_NODE_TABLES], dict)
cnttest += 3
@ -301,12 +307,24 @@ class MyTestCase(unittest.TestCase):
"testrest:person;1;Brecht;Bert;10.02.98;m",
"testrest:person,testcrmdb:person;2;Leon;Donna;28.09.42;f"
]
tdata = f.parseCsvSpec(job.m, specLines, D.CSV_SPECTYPE_DATA, {}, job)
tdata = f.parseCsv(job.m, job, specLines, D.CSV_SPECTYPE_DATA)
print(tdata)
MyTestCase.mymsg += "\n----- "+actfunction+" : "+str(cnttest)
def stripDelimiter(self, lines):
out = ""
for l in lines:
if len(l) > 0 and l[0:1] == "#":
continue
while len(l) > 1 and l[-1:] == ";":
l = l[:-1]
out += "\n" + l
if len(out) > 0:
out = out[1:]
return out
def test_03getCsvSpec_tree(self):
# TODO : Baumstruktur fuer properties
global mymsg
actfunction = str(inspect.currentframe().f_code.co_name)
cnttest = 0
@ -330,16 +348,17 @@ class MyTestCase(unittest.TestCase):
";Meldung_senden;Mock;1;cli;must:;;",
";Batche_starten_stopen;Mock;1;api;must:;;"
]
job = test.testtools.getWorkspaceJob(PROGRAM_NAME)
MyTestCase.mymsg += "\n----- "+actfunction+" : "+str(cnttest)
def test_04getCsvSpec_key(self):
def test_14getCsvSpec_key(self):
global mymsg
actfunction = str(inspect.currentframe().f_code.co_name)
cnttest = 0
if actfunction not in TEST_FUNCTIONS:
return
job = test.testtools.getJob()
job = test.testtools.getWorkspaceJob(PROGRAM_NAME)
""""
c) keys : as a tree - the rows must be unique identified by the first column
a_0 is keyword in CSV_HEADER_START
@ -353,8 +372,8 @@ class MyTestCase(unittest.TestCase):
"#;;;;;;"
]
f = toolHandling.getFileTool(job, None, "csv")
tdata = f.parseCsv(job.m, specLines, D.CSV_SPECTYPE_CONF, {}, job)
self.assertEqual(0, len(tdata))
tdata = f.parseCsv(job.m, job, specLines, D.CSV_SPECTYPE_KEYS)
self.assertEqual(1, len(tdata))
cnttest += 1
if "malformated" in tests:
malformat = "table;key;;;;;"
@ -373,9 +392,9 @@ class MyTestCase(unittest.TestCase):
]
tdata = f.parseCsv(job.m, job, specLines, D.CSV_SPECTYPE_KEYS)
print(str(tdata))
self.assertEqual(1, len(tdata))
self.assertEqual(2, len(tdata))
self.assertEqual(1, len(tdata["_tables"]))
self.assertEqual(4, len(tdata["_tables"]["capital"]))
self.assertEqual(3, len(tdata["_tables"]["capital"]))
self.assertEqual(3, len(tdata["_tables"]["capital"]["_keys"]))
cnttest += 4
specLines = [
@ -389,23 +408,22 @@ class MyTestCase(unittest.TestCase):
tdata = f.parseCsv(job.m, job, specLines, D.CSV_SPECTYPE_KEYS)
#tdata = f.parseCsvSpec(job.m, specLines, D.CSV_SPECTYPE_TREE)
print(str(tdata))
self.assertEqual(1, len(tdata))
self.assertEqual(2, len(tdata))
self.assertIn("capital", tdata["_tables"])
self.assertEqual(2, len(tdata["_tables"]))
self.assertEqual(4, len(tdata["_tables"]["country"]))
self.assertEqual(3, len(tdata["_tables"]["country"]))
self.assertEqual(2, len(tdata["_tables"]["country"]["_keys"]))
cnttest += 4
MyTestCase.mymsg += "\n----- "+actfunction+" : "+str(cnttest)
def test_05getCsvSpec_conf(self):
def test_15getCsvSpec_conf(self):
global mymsg
actfunction = str(inspect.currentframe().f_code.co_name)
cnttest = 0
if actfunction not in TEST_FUNCTIONS:
return
job = basic.program.SimpleJob(PROGRAM_NAME)
# job = test.testtools.getJob()
job = test.testtools.getWorkspaceJob(PROGRAM_NAME)
f = toolHandling.getFileTool(job, None, "csv")
""""
d) conf:
@ -421,11 +439,11 @@ class MyTestCase(unittest.TestCase):
"#;;;;;;"
]
specLinesB = [
"_type;conf;;;;;;",
"_type;conf",
"table:lofts;_field;field;type;acceptance;key",
"lofts;street;a;str;;T:1",
";street;a;str;;T:1",
";city;b;str;;F:1",
"#;;;;;;"
""
]
tdata = f.parseCsv(job.m, job, specLinesA, D.CSV_SPECTYPE_CONF)
self.assertEqual(2, len(tdata))
@ -447,6 +465,7 @@ class MyTestCase(unittest.TestCase):
returnLines = f.buildCsv(job.m, job, tdata, D.CSV_SPECTYPE_CONF)
print("returnLines:")
print(returnLines)
self.assertEqual("\n".join(specLinesB), returnLines)
MyTestCase.mymsg += "\n----- "+actfunction+" : "+str(cnttest)
@ -456,7 +475,7 @@ class MyTestCase(unittest.TestCase):
cnttest = 0
if actfunction not in TEST_FUNCTIONS:
return
job = test.testtools.getJob()
job = test.testtools.getWorkspaceJob(PROGRAM_NAME)
f = toolHandling.getFileTool(job, None, "csv")
cm = basic.componentHandling.ComponentManager.getInstance(job)
componentName = "testcrmdb"

5
tools/file_tool.py

@ -267,7 +267,7 @@ def write_tile_text(msg, job, path, text, enc="utf-8"):
file.close()
def write_file_dict(msg, job, path, dict, enc="utf-8"):
def write_file_dict(msg, job, path, dict, enc="utf-8", ttype=""):
# job = basic.program.Job.getInstance()
mkPaths(job, path, msg)
if D.DFILE_TYPE_YML in path[-5:]:
@ -290,4 +290,5 @@ def write_file_dict(msg, job, path, dict, enc="utf-8"):
print("fileWriter fuer csv")
ffcts = basic.toolHandling.getFileTool(job, None, D.DFILE_TYPE_CSV)
#doc = tools.tdata_tool.getCsvSpec(msg, job, path, D.CSV_SPECTYPE_CONF)
doc = ffcts.dump_file(dict, path)
doc = ffcts.dump_file(dict, path, ttype)
write_tile_text(msg, job, path, doc, enc)

85
tools/filecsv_fcts.py

@ -28,8 +28,9 @@ class FileFcts(tools.file_abstract.FileFcts):
lines = tools.file_tool.read_file_lines(self.job, path, self.getMsg())
return self.parseCsv(self.getMsg(), self.job, lines, ttype)
def dump_file(self, data, path):
text = self.buildCsv(self.getMsg(), self.job, data)
def dump_file(self, data, path, ttype=""):
text = self.buildCsv(self.getMsg(), self.job, data, ttype)
return text
def parseCsv_alt(self, msg, job, lines, ttype=""):
"""
@ -199,21 +200,22 @@ class FileFcts(tools.file_abstract.FileFcts):
if ttype == "" and D.DATA_ATTR_TYPE in tableAttr:
ttype = tableAttr[D.DATA_ATTR_TYPE]
continue
elif self.isBlock(msg, job, fields[0], D.CSV_BLOCK_HEAD, status): # (a[0].lower() == D.CSV_BLOCK_OPTION):
elif self.isBlock(msg, job, fields[0], D.CSV_BLOCK_HEAD, status):
setTdataLine(tdata, fields, D.CSV_BLOCK_HEAD, job)
status = "start"
continue
elif self.isBlock(msg, job, fields[0], D.CSV_BLOCK_OPTION, status): # (a[0].lower() == D.CSV_BLOCK_OPTION):
elif self.isBlock(msg, job, fields[0], D.CSV_BLOCK_OPTION, status):
setTdataLine(tdata, fields, D.CSV_BLOCK_OPTION, job)
status = "start"
continue
elif self.isBlock(msg, job, fields[0], D.CSV_BLOCK_STEP, status): # (a[0].lower() == D.CSV_BLOCK_OPTION):
elif (status != D.CSV_BLOCK_STEP) \
and self.isBlock(msg, job, fields[0], D.CSV_BLOCK_STEP, status):
h = []
h.append(B.DATA_NODE_STEPS)
if verbose: print(">> step "+l)
step = basic.step.parseStep(job, fields)
if D.CSV_BLOCK_STEP not in tdata:
tdata[D.CSV_BLOCK_STEP] = []
tdata[D.CSV_BLOCK_STEP].append(step)
status = "step"
tableDict = getTdataContent(msg, tdata, h)
setTableHeader(tableDict, tableAttr, fields, ttype, job)
status = D.CSV_BLOCK_STEP
continue
elif self.isBlock(msg, job, fields[0], D.CSV_BLOCK_TABLES, status):
if verbose: print(">> tables " + l)
@ -228,6 +230,16 @@ class FileFcts(tools.file_abstract.FileFcts):
tableDict = getTdataContent(msg, tdata, h)
if verbose: print(">> setTableData " + str(h) + " " + str(tableDict))
setTableData(tableDict, fields, ttype, job)
elif (status == D.CSV_BLOCK_STEP):
print("step-line "+status+": "+l)
h = []
h.append(B.DATA_NODE_STEPS)
tableDict = getTdataContent(msg, tdata, h)
if verbose: print(">> setTableData " + str(h) + " " + str(tableDict))
setTableData(tableDict, fields, ttype, job)
#tableDict = getTdataContent(msg, tdata, h)
#if verbose: print(">> setTableData " + str(h) + " " + str(tableDict))
#setTableData(tableDict, fields, ttype, job)
if D.DATA_ATTR_TYPE not in tableAttr:
tableAttr[D.DATA_ATTR_TYPE] = ttype
@ -246,12 +258,9 @@ class FileFcts(tools.file_abstract.FileFcts):
tdata[k] = tableAttr[k]
if ttype == D.CSV_SPECTYPE_CONF:
fields = []
print(str(tdata))
for k in tdata:
print("k "+k)
if k in ["_hit"] + D.LIST_DATA_ATTR:
continue
print("k "+k)
if B.DATA_NODE_DATA in tdata[k]:
tdata[k].pop(B.DATA_NODE_DATA)
for f in tdata[k]:
@ -313,12 +322,25 @@ class FileFcts(tools.file_abstract.FileFcts):
continue
out += buildHeader(job, data[k][B.DATA_NODE_HEADER], k)
out += buildCtlg(job, data[k][B.DATA_NODE_HEADER], data[k][B.DATA_NODE_KEYS])
elif data[D.DATA_ATTR_TYPE] == D.CSV_SPECTYPE_DDL:
elif data[D.DATA_ATTR_TYPE] in [D.CSV_SPECTYPE_DDL, D.CSV_SPECTYPE_CONF]:
for k in data:
if k in D.LIST_DATA_ATTR:
continue
out += buildHeader(job, data[k][B.DATA_NODE_HEADER], k)
out += buildCtlg(job, data[k][B.DATA_NODE_HEADER], data[k])
if B.DATA_NODE_STEPS in data:
out += "step:header"
for h in data[B.DATA_NODE_STEPS][B.DATA_NODE_HEADER]:
out += delimiter + h
out += "\n"
for row in data[B.DATA_NODE_STEPS][B.DATA_NODE_DATA]:
for h in data[B.DATA_NODE_STEPS][B.DATA_NODE_HEADER]:
if h in [B.DATA_NODE_ARGS, "args"]:
for arg in row[B.DATA_NODE_ARGS]:
out += delimiter + arg + ":" + row[B.DATA_NODE_ARGS][arg]
else:
out += delimiter + row[h]
if len(out) > 0:
return out
@ -363,11 +385,12 @@ def buildCtlg(job, header, table):
for k in table:
if k in D.LIST_DATA_ATTR:
continue
if k in [B.DATA_NODE_HEADER, B.DATA_NODE_DATA, "_hit"]:
if k in [B.DATA_NODE_HEADER, B.DATA_NODE_FIELDS, B.DATA_NODE_DATA, "_hit"]:
continue
for h in header:
print("k "+k+" h "+h+" typvar "+str(type(table[k][h])))
if isinstance(table[k][h], dict):
if h not in table[k]:
out += D.CSV_DELIMITER
elif isinstance(table[k][h], dict):
text = json.dumps(table[k][h])
out += "\"" + text + "\""
else:
@ -480,6 +503,8 @@ def getTdataContent(msg, data, path):
def setTableHeader(tableDict, tableAttr, fields, ttype, job):
header = []
for i in range(1, len(fields)):
if len(fields[i].strip()) < 1:
continue
header.append(fields[i].strip())
tableDict[B.DATA_NODE_HEADER] = header
for attr in tableAttr:
@ -502,20 +527,24 @@ def setTableData(tableDict, fields, ttype, job):
fields = [tableDict[D.DATA_ATTR_ALIAS]] + fields
i = 1
for f in tableDict[B.DATA_NODE_HEADER]:
row[f] = fields[i]
if f in [B.DATA_NODE_ARGS, "args"]:
arguments = {}
row[B.DATA_NODE_ARGS] = arguments
if B.DATA_NODE_ARGS in row:
a = fields[i].split(":")
row[B.DATA_NODE_ARGS][a[0]] = a[1]
else:
row[f] = fields[i]
i += 1
ln = len(tableDict[B.DATA_NODE_HEADER])
for arg in fields[len(tableDict[B.DATA_NODE_HEADER])+1:]:
if len(arg) == 0 or arg.strip()[0:1] == "#":
continue
print("arg "+arg)
a = arg.split(":")
row[B.DATA_NODE_ARGS][a[0]] = a[1]
if ttype == D.CSV_SPECTYPE_DATA:
if B.ATTR_DATA_COMP in tableDict:
tcomps = tableDict[B.ATTR_DATA_COMP]
else:
tcomps = {}
row[B.ATTR_DATA_COMP] = {}
for c in fields[0].split(","):
a = c.split(":")
tcomps[a[0]] = a[1]
row[B.ATTR_DATA_COMP][a[0]] = a[1].strip()
tableDict[B.DATA_NODE_DATA].append(row)
tableDict[B.ATTR_DATA_COMP] = tcomps
elif ttype in [D.CSV_SPECTYPE_KEYS, D.CSV_SPECTYPE_CTLG]:
tableDict[D.CSV_NODETYPE_KEYS][fields[tableDict[D.DATA_ATTR_KEY]].strip()] = row
elif ttype in [D.CSV_SPECTYPE_CONF, D.CSV_SPECTYPE_DDL]:

Loading…
Cancel
Save