Browse Source

csv-fcts init 1

refactor
Ulrich 2 years ago
parent
commit
87bc7b7b7b
  1. 17
      basic/Testserver.py
  2. 18
      basic/catalog.py
  3. 6
      basic/connection.py
  4. 3
      basic/constants.py
  5. 7
      basic/message.py
  6. 14
      basic/program.py
  7. 6
      basic/testexecution.py
  8. 6
      basic/user.py
  9. 0
      model/component.py
  10. 0
      model/step.py
  11. 0
      test/test_28step.py
  12. 519
      test/test_31filecsv.py
  13. 7
      test/testtools.py
  14. 0
      test/unit.py
  15. 16
      tools/data_const.py
  16. 10
      tools/file_abstract.py
  17. 12
      tools/file_tool.py
  18. 295
      tools/filecsv_fcts.py
  19. 35
      tools/fileyaml_fcts.py

17
basic/Testserver.py

@ -1,8 +1,8 @@
import basic.component
import basic.constants as B
import utils.config_tool
import utils.data_const as D
import utils.file_tool
import tools.config_tool
import tools.data_const as D
import tools.file_tool
COMP_NAME = "testserver"
COMP_TABLES = ["application", "ap_component", "ap_project", "ap_application",
@ -24,9 +24,14 @@ class Testserver(basic.component.Component):
for table in COMP_TABLES:
if table in B.LIST_DB_ATTR:
continue
ddl = utils.config_tool.getConfig(job, D.DDL_FILENAME, COMP_NAME, table)
path = "/home/ulrich/workspace/Datest/temp/DATASTRUCTURE.yml"
utils.file_tool.writeFileDict(job.m, job, path, ddl)
ddl = tools.config_tool.getConfig(job, D.DDL_FILENAME, COMP_NAME, table)
tddl = {}
tddl[table] = {}
for k in ddl:
tddl[table][k] = ddl[k]
tddl[D.DATA_ATTR_TBL] = table
path = "/home/ulrich/workspace/Datest/temp/"+table+".csv"
tools.file_tool.write_file_dict(job.m, job, path, tddl)
if B.DATA_NODE_TABLES in ddl and table in ddl[B.DATA_NODE_TABLES]:
self.conf[B.DATA_NODE_DDL][table] = ddl[B.DATA_NODE_TABLES][table]
elif table in ddl:

18
basic/catalog.py

@ -7,12 +7,12 @@
import os
import basic.program
import basic.constants as B
import utils.path_const as P
import utils.data_const as D
import utils.config_tool
import utils.path_tool
import utils.file_tool
import utils.tdata_tool
import tools.path_const as P
import tools.data_const as D
import tools.config_tool
import tools.path_tool
import tools.file_tool
# import tools.tdata_tool
EXP_KEY_MISSING = "key is missing {}"
EXP_KEY_DOESNT_EXIST = "key doesnt exist in domain {}"
@ -90,13 +90,13 @@ class Catalog:
raise Exception(EXP_KEY_MISSING, (domain))
if domain in self.catalog:
return
pathname = utils.config_tool.getConfigPath(job, P.KEY_CATALOG, domain)
pathname = tools.config_tool.getConfigPath(job, P.KEY_CATALOG, domain)
if pathname is None:
raise Exception(EXP_KEY_MISSING, (domain))
if pathname[-4:] == ".csv":
data = utils.tdata_tool.getCsvSpec(job.m, job, pathname, D.CSV_SPECTYPE_KEYS)
data = tools.tdata_tool.getCsvSpec(job.m, job, pathname, D.CSV_SPECTYPE_KEYS)
else:
data = utils.file_tool.readFileDict(job, pathname, job.m)
data = tools.file_tool.readFileDict(job, pathname, job.m)
self.catalog[domain] = data[B.DATA_NODE_TABLES][domain][B.DATA_NODE_KEYS]
return data

6
basic/connection.py

@ -7,9 +7,9 @@
import basic.toolHandling
import utils.data_const as D
import basic.constants as B
import basic.entity
import model.entity
class Connection(basic.entity.Entity):
class Connection(model.entity.Entity):
name = ""
description = ""
application = ""
@ -26,7 +26,7 @@ class Connection(basic.entity.Entity):
self.job = job
def getSchema(self):
def get_schema(self):
dbtype = self.job.conf[B.TOPIC_NODE_DB][B.ATTR_TYPE]
dbi = basic.toolHandling.getDbTool(self.job, None, dbtype)
print(str(dbi))

3
basic/constants.py

@ -97,6 +97,8 @@ CONF_NODE_GENERAL = "_general"
""" This constant defines a subnode of a table for the column-names """
DATA_NODE_HEADER = "_header"
""" This constant defines a subnode of a table for the column-names """
DATA_NODE_FIELDS = "_fields"
""" This constant defines a subnode of a table for the column-names """
DATA_NODE_DATA = "_data"
""" This constant defines a subnode of a table for the data which are set as key-value-pair with the column-names as key """
DATA_NODE_STEPS = "_steps"
@ -157,6 +159,7 @@ ATTR_FILE_OLD = "oldfile"
ATTR_FILE_ROTATE = "rotate"
NODE_ATTRIBUTES = "attributes"
LIST_FILE_ATTR = [ATTR_FILE_OLD, ATTR_FILE_ROTATE] + LIST_ARTS_ATTR
LIST_ATTR = {
TOPIC_NODE_DB: LIST_DB_ATTR,

7
basic/message.py

@ -67,6 +67,9 @@ class TempMessage:
self.debugpath = os.path.join(path, "debug_"+logTime+".txt")
self.debugfile = open(self.debugpath, "w")
def getLogLevel(self, tool="", comp=None):
return 0
def logFatal(self, text):
self.debug(LIMIT_FATAL, "FATAL: " + text)
@ -118,7 +121,7 @@ class Message:
Ausgaben erfolgen prioritaeten-gesteuert anhand
* Typ (fatal..trace)
* Einstellung (a) ueber Parameter ODER (b) in Funktion
Im Funktionskopf wird Einstellung gesetzt, z.B. verify=job.getDebugLevel (ggf keine Debug-Ausgabe) bzw. verify=job.getDebugLevel-1 (eingeschaltete Debug-Ausgabe)
Im Funktionskopf wird Einstellung gesetzt, z.B. verify=job.getDebugLevel (ggf keine Debug-Ausgabe) bzw. verify=job.getMessageLevel-1 (eingeschaltete Debug-Ausgabe)
"fatal": "3", # Abbruchfehlker, wird immer in debug und log ausgegeben, setzt RC
"error": "2", # Fehler, wird immer in debug und log ausgegeben, setzt RC
"warn": "1", # Warnung, wird immer in debug und log ausgegeben, setzt RC
@ -239,7 +242,7 @@ class Message:
rcId = returnCode
else:
rcId = ( LIST_MTEXT.index(returnCode.lower()) - 3 ) * (-1)
if self.rc <= int(rcId) or rcId < 0:
if self.rc >= int(rcId) or rcId < 0:
return True
else:
return False

14
basic/program.py

@ -54,9 +54,12 @@ class SimpleJob:
path = tools.path_tool.getBasisConfigPath()
self.conf = getConfiguration(self, path)
self.jobid = str(100000)
catalog = model.catalog.Catalog.getInstance()
self.programDef = catalog.getValue(self, CTLG_NAME, program, "")
try:
catalog = model.catalog.Catalog.getInstance()
self.programDef = catalog.getValue(self, CTLG_NAME, program, "")
except:
self.m = basic.message.TempMessage(self, "testtime")
pass
if args is not None:
if "par" in args:
self.par = Parameter(self, args["par"])
@ -140,6 +143,7 @@ class Job:
path = tools.path_tool.getBasisConfigPath()
self.conf = getConfiguration(self, path)
catalog = model.catalog.Catalog.getInstance()
print("program "+program)
self.programDef = catalog.getValue(self, CTLG_NAME, program, "")
try:
path = tools.config_tool.select_config_path(self, P.KEY_BASIC, B.BASIS_FILE)
@ -248,7 +252,7 @@ class Job:
cconf = basic.componentHandling.getComponentDict()
output["par"] = self.par.__dict__
if len(cconf) < 1:
tools.file_tool.writeFileDict(self.m, self, parpath, output)
tools.file_tool.write_file_dict(self.m, self, parpath, output)
return
output[B.SUBJECT_COMPS] = {}
for c in cconf:
@ -259,7 +263,7 @@ class Job:
output[B.SUBJECT_COMPS][c][x] = cconf[c][x]
if x == B.SUBJECT_CONN and "passwd" in cconf[c][x]:
cconf[B.SUBJECT_COMPS][c][x]["passwd"] = "xxxxx"
tools.file_tool.writeFileDict(self.m, self, parpath, output)
tools.file_tool.write_file_dict(self.m, self, parpath, output)
def loadParameter(self):

6
basic/testexecution.py

@ -7,9 +7,9 @@
import basic.toolHandling
import utils.data_const as D
import basic.constants as B
import basic.entity
import model.entity
class Testexecution(basic.entity.Entity):
class Testexecution(model.entity.Entity):
name = ""
description = "" # from testplan, testsuite, testcase
release = ""
@ -24,7 +24,7 @@ class Testexecution(basic.entity.Entity):
"""
self.job = job
def getSchema(self):
def get_schema(self):
dbtype = self.job.conf[B.TOPIC_NODE_DB][B.ATTR_TYPE]
dbi = basic.toolHandling.getDbTool(self.job, None, dbtype)
sql = dbi.getCreateTable("testexecution")

6
basic/user.py

@ -7,7 +7,7 @@
import basic.toolHandling
import utils.data_const as D
import basic.constants as B
import basic.entity
import model.entity
ddl = {
"user": {
@ -23,7 +23,7 @@ ddl = {
}
}
class User(basic.entity.Entity):
class User(model.entity.Entity):
username = ""
password = ""
@ -38,7 +38,7 @@ class User(basic.entity.Entity):
self.conf[B.DATA_NODE_DDL] = self.getDdl(job, ddl)
self.m = job.m
def getSchema(self):
def get_schema(self):
dbtype = self.job.conf[B.TOPIC_NODE_DB][B.ATTR_TYPE]
dbi = basic.toolHandling.getDbTool(self.job, None, dbtype)
sql = dbi.getCreateTable("user")

0
model/component.py

0
model/step.py

0
test/test_28step.py

519
test/test_31filecsv.py

@ -0,0 +1,519 @@
import unittest
import inspect
import tools.filecsv_fcts
import basic.constants as B
import basic.toolHandling as toolHandling
import tools.data_const as D
import tools.path_const as P
import tools.config_tool
import test.testtools
import test.constants
import basic.program
import tools.path_tool
import tools.file_tool
import os
HOME_PATH = test.constants.HOME_PATH
DATA_PATH = test.constants.DATA_PATH
OS_SYSTEM = test.constants.OS_SYSTEM
"""
a) catalog: key(s) - values # meta-spec, meta-auto
b) head: key - value # spec-info
c) option: key - value # spec -> job.par
d) step: key=function - values # spec (tp, ts) -> comp.function
e) step: key=usecase - values # spec (tc) -> comp.steps
f) ddl-table: key=field - vaulues=attributes # meta-spec, comp
g) data-table: array: field - values # spec.data, comp.artifacts
"""
# the list of TEST_FUNCTIONS defines which function will be really tested.
# if you minimize the list you can check the specific test-function
TEST_FUNCTIONS = ["test_11ddl", "test_12catalog",
"test_02getCsvSpec_data", "test_03getCsvSpec_tree", "test_04getCsvSpec_key",
"test_05getCsvSpec_conf", "test_06parseCsv"]
TEST_FUNCTIONS = ["test_12catalog"]
TEST_FUNCTIONS = ["test_11ddl"]
PROGRAM_NAME = "clean_workspace"
# with this variable you can switch prints on and off
verbose = False
class MyTestCase(unittest.TestCase):
mymsg = "--------------------------------------------------------------"
def test_11ddl(self):
global mymsg
context = D.CSV_SPECTYPE_DDL
actfunction = str(inspect.currentframe().f_code.co_name)
cnttest = 0
if actfunction not in TEST_FUNCTIONS:
return
# job = basic.program.SimpleJob(PROGRAM_NAME)
job = test.testtools.getWorkspaceJob(PROGRAM_NAME)
f = toolHandling.getFileTool(job, None, "csv")
fy = toolHandling.getFileTool(job, None, "yaml")
csvText = "_type;"+context+";;;;;\n"
csvText += "table:police;_field;comment;format;acceptance;generate;nullable\n"
csvText += ";polid;;int;ignore;auto-id;n\n"
csvText += ";polnr;;string;;auto-id;n\n"
csvText += ";polvers;;int;;build-id;n\n"
csvText += ";persid;;int;;ref-person;n\n"
csvText += ";objid;;int;;ref-object;n\n"
csvText += ";amount;;dec;;range;n\n"
data = f.parseCsv(job.m, job, csvText.split("\n"), ttype="")
self.assertIn("_type", data)
self.assertIn("police", data)
self.assertIn("polid", data["police"])
self.assertIn("format", data["police"]["polid"])
self.assertIn("int", data["police"]["objid"]["format"])
csvText = "table:police;_field;comment;format;acceptance;generate;nullable\n"
csvText += ";polid;;int;ignore;auto-id;n\n"
csvText += ";polnr;;string;;auto-id;n\n"
csvText += ";polvers;;int;;build-id;n\n"
csvText += ";persid;;int;;ref-person;n\n"
csvText += ";objid;;int;;ref-object;n\n"
csvText += ";amount;;dec;;range;n\n"
data = f.parseCsv(job.m, job, csvText.split("\n"), ttype=context)
self.assertIn("_type", data)
self.assertIn("police", data)
self.assertIn("polid", data["police"])
self.assertIn("format", data["police"]["polid"])
self.assertIn("int", data["police"]["objid"]["format"])
text = fy.dump_file(data)
#print(str(data))
#print(text)
job.m.logInfo(csvText)
job.m.logInfo("----------------------------------------")
result = f.buildCsv(job.m, job, data, ttype="")
self.assertRegex(result, r"_type;"+context)
result = f.buildCsv(job.m, job, data, ttype=D.CSV_SPECTYPE_DDL)
self.assertNotIn("_type", result)
job.m.logInfo(result)
job.m.logInfo("----------------------------------------")
job.m.logInfo(text)
def test_12catalog(self):
global mymsg
context = D.CSV_SPECTYPE_CTLG
actfunction = str(inspect.currentframe().f_code.co_name)
cnttest = 0
if actfunction not in TEST_FUNCTIONS:
return
job = basic.program.SimpleJob(PROGRAM_NAME)
f = toolHandling.getFileTool(job, None, "csv")
fy = toolHandling.getFileTool(job, None, "yaml")
csvText = "_type;ctlg;;;;;;;;;;;;;\n"
csvText += "_key;name;;;;;;;;;;;;;\n"
csvText += "table:programs;name;objtype;objname;time;env;app;variant;pardef;pfilesource;pfiletarget;dirname;basedir;loglevel;logpath\n"
csvText += ";test_executer;tp,ts,tc;m;m;m;m;o;\"{\"gran\": \"args\", \"application\": \"args\", \"environment\": \"args\", \"testelem\": \"args\", \"variant\": \"args\"}\";;;{objtype}dir;{objtype}base;info;{job.par.wsdir}/{log}/log_{job.start}.txt\n"
csvText += ";init_testsuite;ts;m;o;m;m;o;\"{\"gran\": \"testsuite\", \"application\": \"args\", \"environment\": \"args\", \"testsuite\": \"args\", \"variant\": \"args\"}\";envparfile;tsparfile;tsdir;tsbase;info;{job.par.tsdir}/{log}/{job.program}_{job.start}.txt\n"
data = f.parseCsv(job.m, job, csvText.split("\n"), ttype="")
"""self.assertIn("_type", data)
self.assertIn("programs", data)
self.assertIn("polid", data["police"])
self.assertIn("format", data["police"]["polid"])
self.assertIn("int", data["police"]["objid"]["format"])
"""
text = fy.dump_file(data)
#print(str(data))
#print(text)
logPath = os.path.join("/home/ulrich/workspace/testprojekt/temp/log_test.txt")
logger = open(logPath, "w")
logger.write(csvText)
job.m.logInfo(csvText)
job.m.logInfo("----------------------------------------")
logger.write("----------------------------------------\n")
#self.assertRegex(result, r"_type;"+context)
result = f.buildCsv(None, job, data, ttype=context)
#self.assertNotIn("_type", result)
logger.write(result)
logger.write("----------------------------------------\n")
logger.write(text)
#job.m.logInfo(result)
#job.m.logInfo("----------------------------------------")
#job.m.logInfo(text)
logger.close()
self.assertEqual(csvText, result)
def test_01tdata(self):
global mymsg
actfunction = str(inspect.currentframe().f_code.co_name)
cnttest = 0
if actfunction not in TEST_FUNCTIONS:
return
job = basic.program.SimpleJob(PROGRAM_NAME)
setattr(job.par, "tdtyp", "dir")
setattr(job.par, "tdsrc", "TC0001")
setattr(job.par, "tdname", "testspec")
filename = str(job.conf["paths"]["testdata"]) + "/" + getattr(job.par, "tdsrc") + "/" + getattr(job.par, "tdname") + ".csv"
#tdata = f.readCsv(job.m, filename, None)
#self.assertEqual(len(tdata["testa1"]), 3)
setattr(job.par, "tdtyp", "dir")
setattr(job.par, "tdsrc", "TST001")
#tdata = f.getTestdata()
#self.assertEqual(("steps" in tdata), True)
MyTestCase.mymsg += "\n----- "+actfunction+" : "+str(cnttest)
def test_02isBlock(self):
global mymsg
actfunction = str(inspect.currentframe().f_code.co_name)
cnttest = 0
if actfunction not in TEST_FUNCTIONS:
return
job = basic.program.SimpleJob(PROGRAM_NAME)
f = toolHandling.getFileTool(job, None, "csv")
res = f.isBlock(job.m, job, "_type", D.CSV_BLOCK_ATTR, "status")
self.assertEqual(True, res)
res = f.isBlock(job.m, job, "", D.CSV_BLOCK_ATTR, "status")
self.assertEqual(True, res)
res = f.isBlock(job.m, job, "head:name", D.CSV_BLOCK_OPTION, "status")
self.assertEqual(False, res)
res = f.isBlock(job.m, job, "option:name", D.CSV_BLOCK_OPTION, "status")
self.assertEqual(True, res)
res = f.isBlock(job.m, job, ":name", D.CSV_BLOCK_OPTION, "option")
self.assertEqual(True, res)
res = f.isBlock(job.m, job, "table:name", D.CSV_BLOCK_OPTION, "option")
self.assertEqual(False, res)
def test_02getCsvSpec_data(self):
global mymsg
actfunction = str(inspect.currentframe().f_code.co_name)
cnttest = 0
if actfunction not in TEST_FUNCTIONS:
return
job = test.testtools.getJob()
# filename = os.path.join(job.conf["paths"]["testdata"], getattr(job.par, "tdsrc"), getattr(job.par, "tdname") + ".csv")
"""
a) data : like a table with data-array of key-value-pairs
a_0 is keyword [option, step, CSV_HEADER_START ]
a_0 : { a_1 : { f_1 : v_1, .... } # option, step
a_0 : { .. a_n : { _header : [ .. ], _data : [ rows... ] # table, node
"""
tests = ["malformated", "comments", D.CSV_BLOCK_OPTION, D.CSV_BLOCK_STEP, B.DATA_NODE_TABLES]
if "comments" in tests:
specLines = [
";;;;;;",
"#;;;;;;"
]
f = toolHandling.getFileTool(job, None, "csv")
tdata = f.parseCsv(job.m, job, specLines, D.CSV_SPECTYPE_DATA)
self.assertEqual(0, len(tdata))
cnttest += 1
if "malformated" in tests:
malformat = "option;arg;;;;;"
specLines = [
"option:par;arg;;;;;",
malformat,
"#option:nopar;arg;;;;;",
"#;;;;;;"
]
self.assertRaises(Exception, f.parseCsv, (job.m, job, specLines, D.CSV_SPECTYPE_DATA))
cnttest += 1
malformat = "step;component;1;arg:val;;;;;"
specLines = [
"step:1;component;1;arg:val;;;",
malformat
]
# TODO sortierung nicht ausgwertet
# self.assertRaises(D.EXCP_MALFORMAT+malformat, f.parseCsvSpec, (job.m, specLines, D.CSV_SPECTYPE_DATA))
malformat = "step:2;component;1;arg;;;;;"
specLines = [
"step:1;component;1;arg:val;;;",
malformat
]
self.assertRaises(Exception, f.parseCsv, (job.m, job, specLines, D.CSV_SPECTYPE_DATA))
cnttest += 1
specLines = [
"option:par;arg;;;;;",
"#option:nopar;arg;;;;;",
"#;;;;;;"
]
if D.CSV_BLOCK_OPTION in tests:
specLines = [
"option:description;something;;;;;",
"#;;;;;;"
]
tdata = f.parseCsvSpec(job.m, specLines, D.CSV_SPECTYPE_DATA, {}, job)
self.assertEqual(1, len(tdata))
print(tdata)
self.assertIn(D.CSV_BLOCK_OPTION, tdata)
cnttest += 2
if D.CSV_BLOCK_STEP in tests:
specLines = [
"step:1;testa;1;1;table:_lofts,action:import;;;;;",
"#;;;;;;"
]
tdata = f.parseCsvSpec(job.m, specLines, D.CSV_SPECTYPE_DATA, {}, job)
print(tdata)
self.assertEqual(1, len(tdata))
self.assertIn(B.DATA_NODE_STEPS, tdata)
self.assertIsInstance(tdata[B.DATA_NODE_STEPS], list)
cnttest += 3
for step in tdata[B.DATA_NODE_STEPS]:
print(step)
self.assertEqual(hasattr(step, B.DATA_NODE_COMP), True)
# self.assertEqual(hasattr(step, B.ATTR_DATA_REF), True)
self.assertEqual(hasattr(step, B.ATTR_STEP_ARGS), True)
cnttest += 3
specLines = [
"step:1;testa;1;1;table:_lofts;action:export;;;;;",
"#;;;;;;"
]
tdata = {}
tdata = f.parseCsvSpec(job.m, specLines, D.CSV_SPECTYPE_DATA, {}, job)
print(tdata)
self.assertEqual(1, len(tdata))
self.assertIn(B.DATA_NODE_STEPS, tdata)
self.assertIsInstance(tdata[B.DATA_NODE_STEPS], list)
self.assertEqual(2, len(tdata[B.DATA_NODE_STEPS][0].args))
cnttest += 3
if B.DATA_NODE_TABLES in tests:
specLines = [
"table:testa:lofts;_nr;street;city;zip;state;beds;baths;sqft;type;price;latitude;longitude",
"testa:lofts;1;stra;town;12345;usa;4;1;50;house;111;45;8",
"#;;;;;;"
]
tdata = f.parseCsvSpec(job.m, specLines, B.DATA_NODE_TABLES, {}, job)
print(tdata)
self.assertEqual(1, len(tdata))
self.assertIn(B.DATA_NODE_TABLES, tdata)
self.assertIsInstance(tdata[B.DATA_NODE_TABLES], dict)
cnttest += 3
for k in tdata[B.DATA_NODE_TABLES]["testa"]:
table = tdata[B.DATA_NODE_TABLES]["testa"][k]
self.assertIn(B.DATA_NODE_HEADER, table)
self.assertIn(B.DATA_NODE_DATA, table)
cnttest += 2
if B.DATA_NODE_TABLES in tests:
specLines = [
"option:description;create 2 new contracts;;;;",
"# ;;;;;",
"# ;component;exec;_nr;action;args;;",
"step:1;testrest;2;1;function:xml-rest;action:new;;",
"step:2;testrest;3;1,2;function:json-rest;action:new;;",
"# ;;;;;",
"# testdate only here specified;expect:row 2 is inserted as precond;;;;",
"_date;01.07.2022;;;;",
"table:person;_nr;famname;name;birth;sex",
"testrest:person;1;Brecht;Bert;10.02.98;m",
"testrest:person,testcrmdb:person;2;Leon;Donna;28.09.42;f"
]
tdata = f.parseCsvSpec(job.m, specLines, D.CSV_SPECTYPE_DATA, {}, job)
print(tdata)
MyTestCase.mymsg += "\n----- "+actfunction+" : "+str(cnttest)
def test_03getCsvSpec_tree(self):
global mymsg
actfunction = str(inspect.currentframe().f_code.co_name)
cnttest = 0
if actfunction not in TEST_FUNCTIONS:
return
job = test.testtools.getJob()
""""
b) tree : as a tree - the rows must be unique identified by the first column
a_0 is keyword in CSV_HEADER_START
a_0 : { .. a_n : { _header : [ fields.. ], _data : { field : value }
"""
specLines = [
"_type;tree;;;;;",
"table:usecae;usecase;executer;nr;subcase;must;args;",
";Meldung_einspielen;Mock;1;Meldung_aufbereiten;must:;;",
";Meldung_einspielen;Mock;2;Meldung_senden;must:;;",
";Meldung_einspielen;Mock;3;Batche_starten_stopen;must:;;",
";Meldung_aufbereiten;Mock;1;Daten_lesen;must:;;",
";Meldung_aufbereiten;Mock;2;Daten_mappen;must:;;",
";Meldung_aufbereiten;Mock;3;Anfrage_schreiben;must:;;",
";Meldung_senden;Mock;1;cli;must:;;",
";Batche_starten_stopen;Mock;1;api;must:;;"
]
MyTestCase.mymsg += "\n----- "+actfunction+" : "+str(cnttest)
def test_04getCsvSpec_key(self):
global mymsg
actfunction = str(inspect.currentframe().f_code.co_name)
cnttest = 0
if actfunction not in TEST_FUNCTIONS:
return
job = test.testtools.getJob()
""""
c) keys : as a tree - the rows must be unique identified by the first column
a_0 is keyword in CSV_HEADER_START
a_1 ... a_n is key characterized by header-field like _fk* or _pk*
a_0 : { .. a_n : { _keys : [ _fpk*.. ] , _header : [ fields.. ], _data : { pk_0 : { ... pk_n : { field : value }
"""
tests = ["malformated", "comments", B.DATA_NODE_TABLES]
if "comments" in tests:
specLines = [
";;;;;;",
"#;;;;;;"
]
f = toolHandling.getFileTool(job, None, "csv")
tdata = f.parseCsv(job.m, specLines, D.CSV_SPECTYPE_CONF, {}, job)
self.assertEqual(0, len(tdata))
cnttest += 1
if "malformated" in tests:
malformat = "table;key;;;;;"
specLines = [
malformat,
"#;;;;;;"
]
self.assertRaises(Exception, f.parseCsv, (job.m, job, specLines, D.CSV_SPECTYPE_KEYS))
cnttest += 1
if B.DATA_NODE_TABLES in tests:
specLines = [
"table:capital;key;val;;;;",
";athens;;;;;",
";berlin;;;;;",
";cairo;;;;;"
]
tdata = f.parseCsv(job.m, job, specLines, D.CSV_SPECTYPE_KEYS)
print(str(tdata))
self.assertEqual(1, len(tdata))
self.assertEqual(1, len(tdata["_tables"]))
self.assertEqual(4, len(tdata["_tables"]["capital"]))
self.assertEqual(3, len(tdata["_tables"]["capital"]["_keys"]))
cnttest += 4
specLines = [
"table:capital;key;val;;;;",
";athens;;;;;",
";berlin;;;;;",
"table:country;key;val;;;;",
";greece;;;;;",
";germany;;;;;"
]
tdata = f.parseCsv(job.m, job, specLines, D.CSV_SPECTYPE_KEYS)
#tdata = f.parseCsvSpec(job.m, specLines, D.CSV_SPECTYPE_TREE)
print(str(tdata))
self.assertEqual(1, len(tdata))
self.assertIn("capital", tdata["_tables"])
self.assertEqual(2, len(tdata["_tables"]))
self.assertEqual(4, len(tdata["_tables"]["country"]))
self.assertEqual(2, len(tdata["_tables"]["country"]["_keys"]))
cnttest += 4
MyTestCase.mymsg += "\n----- "+actfunction+" : "+str(cnttest)
def test_05getCsvSpec_conf(self):
global mymsg
actfunction = str(inspect.currentframe().f_code.co_name)
cnttest = 0
if actfunction not in TEST_FUNCTIONS:
return
job = basic.program.SimpleJob(PROGRAM_NAME)
# job = test.testtools.getJob()
f = toolHandling.getFileTool(job, None, "csv")
""""
d) conf:
_type : conf
_header : [ field_0, ... ]
{ field_0 : { attr_0 : val_0, .. },
field_1 : { ... }, ... }
"""
specLinesA = [
"table:lofts;_field;field;type;acceptance;key",
"lofts;street;a;str;;T:1",
";city;b;str;;F:1",
"#;;;;;;"
]
specLinesB = [
"_type;conf;;;;;;",
"table:lofts;_field;field;type;acceptance;key",
"lofts;street;a;str;;T:1",
";city;b;str;;F:1",
"#;;;;;;"
]
tdata = f.parseCsv(job.m, job, specLinesA, D.CSV_SPECTYPE_CONF)
self.assertEqual(2, len(tdata))
self.assertEqual(D.CSV_SPECTYPE_CONF, tdata[D.DATA_ATTR_TYPE])
self.assertIn("lofts", tdata)
self.assertEqual("_field;field;type;acceptance;key", ";".join(tdata["lofts"][B.DATA_NODE_HEADER]))
tdata = f.parseCsv(job.m, job, specLinesB, "")
print(tdata)
self.assertEqual(2, len(tdata))
self.assertEqual(D.CSV_SPECTYPE_CONF, tdata[D.DATA_ATTR_TYPE])
self.assertNotIn(B.DATA_NODE_TABLES, tdata)
self.assertIn("lofts", tdata)
self.assertEqual("_field;field;type;acceptance;key", ";".join(tdata["lofts"][B.DATA_NODE_HEADER]))
cnttest += 3
table = tdata["lofts"]
self.assertIn(B.DATA_NODE_HEADER, table)
self.assertNotIn(B.DATA_NODE_DATA, table)
cnttest += 2
returnLines = f.buildCsv(job.m, job, tdata, D.CSV_SPECTYPE_CONF)
print("returnLines:")
print(returnLines)
MyTestCase.mymsg += "\n----- "+actfunction+" : "+str(cnttest)
def test_06parseCsv(self):
global mymsg
actfunction = str(inspect.currentframe().f_code.co_name)
cnttest = 0
if actfunction not in TEST_FUNCTIONS:
return
job = test.testtools.getJob()
f = toolHandling.getFileTool(job, None, "csv")
cm = basic.componentHandling.ComponentManager.getInstance(job)
componentName = "testcrmdb"
confs = tools.config_tool.getConfig(job, "comp", componentName)
conns = tools.conn_tool.getConnections(job, componentName)
comp = cm.createInstance(componentName, None, confs, conns, 1)
fileLines = [
"table:person;_nr;famname;name;birth;sex",
"testcrmdb:person;1;Brecht;Bert;10.02.98;m",
"testcrmdb:person;2;Leon;Donna;28.09.42;f",
"#;;;;;;"
]
filename = tools.path_tool.rejoinPath(tools.path_tool.composePath(job, P.P_TCBASE, comp), "t_person.csv")
tdata = f.parseCsv(comp.m, job, filename, fileLines, comp, aliasNode="")
print(str(tdata))
self.assertIn(B.DATA_NODE_TABLES, tdata)
self.assertIn("person", tdata[B.DATA_NODE_TABLES])
self.assertEqual(2, len(tdata[B.DATA_NODE_TABLES]["person"][B.DATA_NODE_DATA]))
cnttest += 3
fileLines = [
"_date;27.06.2022",
"_count;2",
"table:person;_nr;famname;name;birth;sex",
"testcrmdb:person;1;Brecht;Bert;10.02.98;m",
"testcrmdb:person;2;Leon;Donna;28.09.42;f",
"#;;;;;;"
]
tdata = f.parseCsv(comp.m, job, filename, fileLines, comp, aliasNode="")
self.assertIn(B.DATA_NODE_TABLES, tdata)
self.assertIn("person", tdata[B.DATA_NODE_TABLES])
self.assertEqual(2, len(tdata[B.DATA_NODE_TABLES]["person"][B.DATA_NODE_DATA]))
cnttest += 3
filename = tools.path_tool.rejoinPath(tools.path_tool.composePath(job, P.P_TCRESULT, comp), "person.csv")
fileLines = [
"_date;27.06.2022",
"_count;2",
"persid;famname;name;birth;sex",
"1;Brecht;Bert;10.02.98;m",
"2;Leon;Donna;28.09.42;f",
"#;;;;;;"
]
tdata = f.parseCsv(comp.m, job, filename, fileLines, comp, aliasNode="")
self.assertIn(B.DATA_NODE_TABLES, tdata)
self.assertIn("person", tdata[B.DATA_NODE_TABLES])
self.assertEqual(2, len(tdata[B.DATA_NODE_TABLES]["person"][B.DATA_NODE_DATA]))
cnttest += 3
text = ""
for k in tdata[B.DATA_NODE_TABLES]:
print("---------\n"+str(tdata))
text += f.buildCsvData(tdata[B.DATA_NODE_TABLES], k, comp, job)
text += "\n"
print(text)
MyTestCase.mymsg += "\n----- "+actfunction+" : "+str(cnttest)
def test_zzz(self):
print(MyTestCase.mymsg)
if __name__ == '__main__':
unittest.main()

7
test/testtools.py

@ -46,6 +46,13 @@ conf = {
}
}
def getWorkspaceJob(program):
args = {"application": "service", "environment": "Testserver", "project": "TESTPROJ",
"step": 2}
# "usecase": "TST001", "tstime": "2022-03-17_17-28"}
job = basic.program.Job(program, "", args)
return job
def getJob(pgran="", papp="", penv="", ptstamp="", pmode=""):
#job = basic.program.Job.popInstance()
#if not job is None:

0
test/unit.py

16
tools/data_const.py

@ -51,8 +51,13 @@ DATA_ATTR_KEY = "_key"
DATA_ATTR_ALIAS = "_alias"
DATA_ATTR_IDS = "_ids"
DATA_ATTR_REF = "_ref"
LIST_DATA_ATTR = [DATA_ATTR_COUNT, DATA_ATTR_DATE, DATA_ATTR_CHAR, DATA_ATTR_COMP,
DATA_ATTR_REF, DATA_ATTR_IDS, DATA_ATTR_ALIAS, DATA_ATTR_KEY]
DATA_ATTR_TBL = "_table"
DATA_ATTR_TYPE = "_type"
DATA_ATTR_DLIM = "_dlim"
""" name of the table - it can be overwrite from the environment-attribut tablename """
LIST_DATA_ATTR = [DATA_ATTR_TYPE, DATA_ATTR_COUNT, DATA_ATTR_DATE, DATA_ATTR_CHAR, DATA_ATTR_COMP,
DATA_ATTR_REF, DATA_ATTR_IDS, DATA_ATTR_ALIAS, DATA_ATTR_KEY, DATA_ATTR_TBL, DATA_ATTR_DLIM]
LIST_ATTR_CONST = ["DATA_ATTR_COUNT", "DATA_ATTR_DATE", "DATA_ATTR_CHAR", "DATA_ATTR_COMP", "DATA_ATTR_ALIAS", "DATA_ATTR_KEY"]
HEAD_ATTR_DESCR = "decription"
@ -80,14 +85,17 @@ CSV_SPECTYPE_DATA = "data"
CSV_SPECTYPE_TREE = "tree"
CSV_SPECTYPE_KEYS = "keys"
CSV_SPECTYPE_CONF = "conf"
CSV_SPECTYPE_DDL = "ddl"
CSV_SPECTYPE_CTLG = "ctlg"
CSV_NODETYPE_KEYS = "_keys"
CSV_BLOCK_ATTR = "_attr"
CSV_BLOCK_HEAD = "_head"
CSV_BLOCK_OPTION = B.DATA_NODE_OPTION
CSV_BLOCK_STEP = B.DATA_NODE_STEPS
CSV_BLOCK_TABLES = B.DATA_NODE_TABLES
CSV_BLOCK_TABLES = "_table"
CSV_BLOCK_IMPORT = "_import"
LIST_CSV_BLOCKS = [CSV_BLOCK_HEAD, CSV_BLOCK_OPTION, CSV_BLOCK_STEP, CSV_BLOCK_TABLES, CSV_BLOCK_IMPORT]
LIST_CSV_BLOCKS = [CSV_BLOCK_ATTR, CSV_BLOCK_HEAD, CSV_BLOCK_OPTION, CSV_BLOCK_STEP, CSV_BLOCK_TABLES, CSV_BLOCK_IMPORT]
LIST_BLOCK_CONST = ["CSV_BLOCK_HEAD", "CSV_BLOCK_OPTION", "CSV_BLOCK_STEP", "CSV_BLOCK_TABLES", "CSV_BLOCK_IMPORT"]
STEP_COMP_I = 1

10
tools/file_abstract.py

@ -31,7 +31,15 @@ class FileFcts():
return self.job.m
return None
def loadFile(self, path):
def load_file(self, path):
"""
this function parses the text and translates it to dict
:param text:
:return:
"""
raise Exception(B.EXCEPT_NOT_IMPLEMENT)
def dump_file(self, data, path):
"""
this function parses the text and translates it to dict
:param text:

12
tools/file_tool.py

@ -207,7 +207,7 @@ def getModTime(job, filepath):
return out
def read_file_dict(job, path, msg):
def read_file_dict(job, path, msg, ttype=""):
"""
reads and gets general a dict from any kind of filetyp
:param path: with extension of filetype
@ -238,7 +238,7 @@ def read_file_dict(job, path, msg):
elif D.DFILE_TYPE_CSV in path[-5:]:
ffcts = basic.toolHandling.getFileTool(job, None, D.DFILE_TYPE_CSV)
#doc = tools.tdata_tool.getCsvSpec(msg, job, path, D.CSV_SPECTYPE_CONF)
doc = ffcts.loadFile(path)
doc = ffcts.load_file(path, ttype)
# tools.tdata_tool.getCsvSpec(msg, job, path, D.CSV_SPECTYPE_CONF)
return doc
@ -267,7 +267,7 @@ def write_tile_text(msg, job, path, text, enc="utf-8"):
file.close()
def writeFileDict(msg, job, path, dict, enc="utf-8"):
def write_file_dict(msg, job, path, dict, enc="utf-8"):
# job = basic.program.Job.getInstance()
mkPaths(job, path, msg)
if D.DFILE_TYPE_YML in path[-5:]:
@ -286,4 +286,8 @@ def writeFileDict(msg, job, path, dict, enc="utf-8"):
text = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" + text
file.write(text)
file.close()
elif D.DFILE_TYPE_CSV in path[-4:]:
print("fileWriter fuer csv")
ffcts = basic.toolHandling.getFileTool(job, None, D.DFILE_TYPE_CSV)
#doc = tools.tdata_tool.getCsvSpec(msg, job, path, D.CSV_SPECTYPE_CONF)
doc = ffcts.dump_file(dict, path)

295
tools/filecsv_fcts.py

@ -11,6 +11,7 @@ import tools.file_abstract
import basic.constants as B
import tools.data_const as D
import tools.file_tool
from basic import toolHandling
class FileFcts(tools.file_abstract.FileFcts):
@ -18,16 +19,19 @@ class FileFcts(tools.file_abstract.FileFcts):
def __init__(self):
pass
def loadFile(self, path):
def load_file(self, path, ttype=""):
"""
this function parses the text and translates it to dict
:param text:
:return:
"""
lines = tools.file_tool.read_file_lines(self.job, path, self.getMsg())
return self.parseCsv(self.getMsg(), self.job, lines)
return self.parseCsv(self.getMsg(), self.job, lines, ttype)
def parseCsv(self, msg, job, lines, ttype=""):
def dump_file(self, data, path):
text = self.buildCsv(self.getMsg(), self.job, data)
def parseCsv_alt(self, msg, job, lines, ttype=""):
"""
:param msg:
@ -42,7 +46,7 @@ class FileFcts(tools.file_abstract.FileFcts):
tableAttr = {} # table
tableDict = {} # table
for l in lines:
if verbose: print("lines "+l)
if verbose: print(ttype + "--" + status + " lines "+l)
fields = splitFields(l, D.CSV_DELIMITER, job)
# check empty line, comment
if (len(fields) < 1) or (len(l.strip().replace(D.CSV_DELIMITER,"")) < 1):
@ -56,21 +60,23 @@ class FileFcts(tools.file_abstract.FileFcts):
tableAttr = setTableAttribute(tableAttr, a[0], fields[1], job)
if a[0].lower() in D.LIST_DATA_ATTR:
status = "TABLE_ALIAS"
if a[0].lower() == D.DATA_ATTR_TYPE:
ttype = fields[1]
if a[0].lower() == D.DATA_ATTR_KEY:
ttype = D.CSV_SPECTYPE_KEYS
continue
if (a[0].lower() in [D.CSV_BLOCK_HEAD]):
if verbose: print("head "+l)
if verbose: print(">> head "+l)
setTdataLine(tdata, fields, D.CSV_BLOCK_HEAD, job)
status = "start"
continue
elif (a[0].lower() == D.CSV_BLOCK_OPTION):
if verbose: print("option " + l)
if verbose: print(">> option " + l)
setTdataLine(tdata, fields, D.CSV_BLOCK_OPTION, job)
status = "start"
continue
elif (a[0].lower() == D.CSV_BLOCK_STEP):
if verbose: print("step "+l)
if verbose: print(">> step "+l)
step = basic.step.parseStep(job, fields)
if D.CSV_BLOCK_STEP not in tdata:
tdata[D.CSV_BLOCK_STEP] = []
@ -78,14 +84,14 @@ class FileFcts(tools.file_abstract.FileFcts):
status = "start"
continue
elif (a[0].lower() == D.CSV_BLOCK_IMPORT):
if verbose: print("includes " + l)
if verbose: print(">> includes " + l)
if D.CSV_BLOCK_IMPORT not in tdata:
tdata[D.CSV_BLOCK_IMPORT] = []
tdata[D.CSV_BLOCK_IMPORT].append(fields[1])
status = "start"
continue
elif (a[0].lower() == D.CSV_BLOCK_TABLES) or (a[0].lower() in D.CSV_HEADER_START):
if verbose: print("tables "+l)
if verbose: print(">> tables "+l)
h = a
h[0] = B.DATA_NODE_TABLES
if ttype == D.CSV_SPECTYPE_CONF:
@ -95,7 +101,7 @@ class FileFcts(tools.file_abstract.FileFcts):
status = D.CSV_SPECTYPE_DATA
elif (status == D.CSV_SPECTYPE_DATA):
tableDict = getTdataContent(msg, tdata, h)
if verbose: print("setTableData "+str(h)+" "+str(tableDict))
if verbose: print(">> setTableData "+str(h)+" "+str(tableDict))
setTableData(tableDict, fields, ttype, job)
elif (status == "TABLE_ALIAS") and D.DATA_ATTR_ALIAS in tdata:
alias = tdata[D.DATA_ATTR_ALIAS]
@ -128,6 +134,268 @@ class FileFcts(tools.file_abstract.FileFcts):
tdata[B.DATA_NODE_TABLES].pop(B.DATA_NODE_TABLES)
return tdata
def isEmptyLine(self, msg, job, line, fields):
if (len(fields) < 1) or (len(line.strip().replace(D.CSV_DELIMITER, "")) < 1):
status = "start"
return True
if (fields[0][0:1] == "#"):
return True
return False
def isBlock(self, msg, job, field, block, status):
"""
detects the block either on keywords in the field which opens a block
or on status if there is no keyword in the field
:param msg: message-object maybe from component
:param job: job-object with parameter and configuration
:param field: field in the csv-file
:param block:
:param status:
:return:
"""
try:
blockPur = block.replace("_", "")
a = field.split(":")
if a[0] == blockPur:
return True
elif "_"+a[0] in [D.CSV_BLOCK_OPTION, D.CSV_BLOCK_HEAD, D.CSV_BLOCK_STEP, D.CSV_BLOCK_TABLES]:
return False
if blockPur == status:
return True
if block == D.CSV_BLOCK_ATTR and len(a) == 1 and field[0:1] == "_":
return True
return False
except:
print("isBlock "+field + "=?" + block)
def parseCsv(self, msg, job, lines, ttype=""):
"""
:param msg:
:param job:
:param lines:
:param ttype: content
a) catalog: key(s) - values # meta-spec, meta-auto
b) head: key - value # spec-info
c) option: key - value # spec -> job.par
d) step: key=function - values # spec (tp, ts) -> comp.function
e) step: key=usecase - values # spec (tc) -> comp.steps
f) ddl-table: key=field - vaulues=attributes # meta-spec, comp
g) data-table: array: field - values # spec.data, comp.artifacts
:return:
"""
tdata = {}
status = "start"
verbose = False
tableAttr = {} # table
tableDict = {} # table
# Zeilen parsen
for l in lines:
fields = splitFields(l, D.CSV_DELIMITER, job)
if self.isEmptyLine(msg, job, l, fields): continue
a = fields[0].lower().split(":")
# keywords option, step, table
if self.isBlock(msg, job, fields[0], D.CSV_BLOCK_ATTR, status): # a[0].lower() in D.LIST_DATA_ATTR:
tableAttr = setTableAttribute(tableAttr, a[0], fields[1], job)
if ttype == "" and D.DATA_ATTR_TYPE in tableAttr:
ttype = tableAttr[D.DATA_ATTR_TYPE]
continue
elif self.isBlock(msg, job, fields[0], D.CSV_BLOCK_HEAD, status): # (a[0].lower() == D.CSV_BLOCK_OPTION):
setTdataLine(tdata, fields, D.CSV_BLOCK_HEAD, job)
status = "start"
continue
elif self.isBlock(msg, job, fields[0], D.CSV_BLOCK_OPTION, status): # (a[0].lower() == D.CSV_BLOCK_OPTION):
setTdataLine(tdata, fields, D.CSV_BLOCK_OPTION, job)
status = "start"
continue
elif self.isBlock(msg, job, fields[0], D.CSV_BLOCK_STEP, status): # (a[0].lower() == D.CSV_BLOCK_OPTION):
if verbose: print(">> step "+l)
step = basic.step.parseStep(job, fields)
if D.CSV_BLOCK_STEP not in tdata:
tdata[D.CSV_BLOCK_STEP] = []
tdata[D.CSV_BLOCK_STEP].append(step)
status = "step"
continue
elif self.isBlock(msg, job, fields[0], D.CSV_BLOCK_TABLES, status):
if verbose: print(">> tables " + l)
h = a
h[0] = B.DATA_NODE_TABLES
if ttype == D.CSV_SPECTYPE_CONF:
del h[0]
tableDict = getTdataContent(msg, tdata, h)
setTableHeader(tableDict, tableAttr, fields, ttype, job)
status = D.CSV_SPECTYPE_DATA
elif (status == D.CSV_SPECTYPE_DATA):
tableDict = getTdataContent(msg, tdata, h)
if verbose: print(">> setTableData " + str(h) + " " + str(tableDict))
setTableData(tableDict, fields, ttype, job)
if D.DATA_ATTR_TYPE not in tableAttr:
tableAttr[D.DATA_ATTR_TYPE] = ttype
if ttype in [D.CSV_SPECTYPE_DDL, D.CSV_SPECTYPE_CTLG]:
if len(tdata[B.DATA_NODE_TABLES]) > 1:
job.m.setError("Mehr als einr Tabelle in "+ttype)
elif len(tdata[B.DATA_NODE_TABLES]) == 0:
job.m.setError("Keine Tabelle in "+ttype)
tdata = {}
else:
data = {}
for k in tdata[B.DATA_NODE_TABLES]:
data[k] = tdata[B.DATA_NODE_TABLES][k]
tdata = data
for k in tableAttr:
tdata[k] = tableAttr[k]
if ttype == D.CSV_SPECTYPE_CONF:
fields = []
print(str(tdata))
for k in tdata:
print("k "+k)
if k in ["_hit"] + D.LIST_DATA_ATTR:
continue
print("k "+k)
if B.DATA_NODE_DATA in tdata[k]:
tdata[k].pop(B.DATA_NODE_DATA)
for f in tdata[k]:
if f in [B.DATA_NODE_HEADER, "_hit"] + D.LIST_DATA_ATTR:
continue
fields.append(f)
tdata[k][B.DATA_NODE_FIELDS] = fields
header = []
if B.DATA_NODE_TABLES in tdata and B.DATA_NODE_TABLES in tdata[B.DATA_NODE_TABLES]:
for k in tdata[B.DATA_NODE_TABLES][B.DATA_NODE_TABLES]:
if k in tdata[B.DATA_NODE_TABLES]:
if verbose: print("Error")
else:
tdata[B.DATA_NODE_TABLES][k] = tdata[B.DATA_NODE_TABLES][B.DATA_NODE_TABLES][k]
tdata[B.DATA_NODE_TABLES].pop(B.DATA_NODE_TABLES)
if "_hit" in tdata:
tdata.pop("_hit")
return tdata
def buildCsv(self, msg, job, data, ttype=""):
""""
d) conf:
_type : conf
_header : [ field_0, ... ]
{ field_0 : { attr_0 : val_0, .. },
field_1 : { ... }, ... }
-->
"_type;conf;;;;;;",
"table:lofts;_field;field;type;acceptance;key",
"lofts;street;a;str;;T:1",
";city;b;str;;F:1",
"#;;;;;;"
"""
out = ""
fields = []
table = ""
header = []
lines = []
tableData = {}
delimiter = D.CSV_DELIMITER
if D.DATA_ATTR_DLIM in data:
delimiter = data[D.DATA_ATTR_DLIM]
if D.DATA_ATTR_TYPE not in data and ttype != "":
data[D.DATA_ATTR_TYPE] = ttype
for f in D.LIST_DATA_ATTR:
if f in data and f == D.DATA_ATTR_TBL:
line = f + delimiter + data[f] + D.CSV_DELIMITER
lines.append(line)
elif ttype != "" and data[D.DATA_ATTR_TYPE] in [D.CSV_SPECTYPE_DDL]:
continue
elif f in data:
out += f + D.CSV_DELIMITER + data[f] + "\n"
if data[D.DATA_ATTR_TYPE] == D.CSV_SPECTYPE_CTLG:
for k in data:
if k in D.LIST_DATA_ATTR:
continue
if k in [B.DATA_NODE_TABLES, B.DATA_NODE_HEADER, "_hit"]:
continue
out += buildHeader(job, data[k][B.DATA_NODE_HEADER], k)
out += buildCtlg(job, data[k][B.DATA_NODE_HEADER], data[k][B.DATA_NODE_KEYS])
elif data[D.DATA_ATTR_TYPE] == D.CSV_SPECTYPE_DDL:
for k in data:
if k in D.LIST_DATA_ATTR:
continue
out += buildHeader(job, data[k][B.DATA_NODE_HEADER], k)
out += buildCtlg(job, data[k][B.DATA_NODE_HEADER], data[k])
if len(out) > 0:
return out
if B.DATA_NODE_TABLES in data:
print("_tables in data")
for k in data[B.DATA_NODE_TABLES].keys():
tableData[k] = data[B.DATA_NODE_TABLES][k]
else:
for k in data.keys():
if k in D.LIST_DATA_ATTR:
continue
tableData[k] = data[k]
for k in tableData:
fields = []
if B.DATA_NODE_FIELDS in data[k]:
fields = data[k][B.DATA_NODE_FIELDS]
if B.DATA_NODE_HEADER in data[k]:
header = data[k][B.DATA_NODE_HEADER]
line = "table:" + k + D.CSV_DELIMITER + D.CSV_DELIMITER.join(header)
lines.append(line)
continue
if B.DATA_NODE_DATA in data[k]:
for row in data[k][B.DATA_NODE_DATA]:
for h in header:
line += D.CSV_DELIMITER + row[h]
lines.append(line)
else:
line = D.CSV_DELIMITER + k
for f in fields:
for h in header:
line += D.CSV_DELIMITER + tableData[f][h]
lines.append(line)
out = "\n".join(lines)
return out
def buildHeader(job, header, tableName):
return "table:" + tableName + ";" + ";".join(header) + "\n"
def buildCtlg(job, header, table):
out = ""
for k in table:
if k in D.LIST_DATA_ATTR:
continue
if k in [B.DATA_NODE_HEADER, B.DATA_NODE_DATA, "_hit"]:
continue
for h in header:
print("k "+k+" h "+h+" typvar "+str(type(table[k][h])))
if isinstance(table[k][h], dict):
text = json.dumps(table[k][h])
out += "\"" + text + "\""
else:
out += D.CSV_DELIMITER + table[k][h]
out += "\n"
return out
def buildDdl(job, header, table):
out = ""
for k in table:
if k in D.LIST_DATA_ATTR:
continue
if k in [B.DATA_NODE_HEADER, B.DATA_NODE_DATA, "_hit"]:
continue
for h in header:
out += D.CSV_DELIMITER + table[k][h]
out += "\n"
return out
def convertRows2Text(job, header, tableData):
text = ""
for f in tableData:
if f in D.LIST_DATA_ATTR:
continue
for h in header:
print(h)
def splitFields(line, delimiter, job):
out = []
fields = line.split(delimiter)
@ -219,7 +487,7 @@ def setTableHeader(tableDict, tableAttr, fields, ttype, job):
# preparate the sub-structure for row-data
if ttype == D.CSV_SPECTYPE_TREE:
tableDict[B.DATA_NODE_DATA] = {}
elif ttype == D.CSV_SPECTYPE_KEYS:
elif ttype in [D.CSV_SPECTYPE_KEYS, D.CSV_SPECTYPE_CTLG]:
tableDict[D.CSV_NODETYPE_KEYS] = {}
tableDict[D.DATA_ATTR_KEY] = 1
if D.DATA_ATTR_KEY in tableAttr:
@ -248,8 +516,9 @@ def setTableData(tableDict, fields, ttype, job):
row[B.ATTR_DATA_COMP][a[0]] = a[1].strip()
tableDict[B.DATA_NODE_DATA].append(row)
tableDict[B.ATTR_DATA_COMP] = tcomps
elif ttype == D.CSV_SPECTYPE_KEYS:
elif ttype in [D.CSV_SPECTYPE_KEYS, D.CSV_SPECTYPE_CTLG]:
tableDict[D.CSV_NODETYPE_KEYS][fields[tableDict[D.DATA_ATTR_KEY]].strip()] = row
elif ttype == D.CSV_SPECTYPE_CONF:
elif ttype in [D.CSV_SPECTYPE_CONF, D.CSV_SPECTYPE_DDL]:
tableDict[fields[1]] = row
return tableDict

35
tools/fileyaml_fcts.py

@ -0,0 +1,35 @@
import json
import re
import yaml
import basic.program
import tools.file_abstract
import basic.constants as B
import tools.data_const as D
import tools.file_tool
from basic import toolHandling
class FileFcts(tools.file_abstract.FileFcts):
def __init__(self):
pass
def load_file(self, path):
"""
this function parses the text and translates it to dict
:param text:
:return:
"""
with open(path, 'r', encoding="utf-8") as file:
doc = yaml.full_load(file)
file.close()
return doc
def dump_file(self, data, path=""):
if path == "":
return yaml.dump(data)
with open(path, 'w', encoding="utf-8") as file:
yaml.dump(data, file)
file.close()
Loading…
Cancel
Save