Browse Source

tests for tdata

master
Ulrich Carmesin 2 years ago
parent
commit
a7fd2f2929
  1. 2
      basic/constants.py
  2. 144
      test/test_tdata.py
  3. 7
      utils/data_const.py
  4. 2
      utils/file_tool.py
  5. 37
      utils/tdata_tool.py

2
basic/constants.py

@ -70,6 +70,8 @@ DATA_NODE_STEPS = "_steps"
""" This constant defines the main node in the testdata for the steps to execute """ """ This constant defines the main node in the testdata for the steps to execute """
DATA_NODE_OPTION = "_option" DATA_NODE_OPTION = "_option"
""" This constant defines main node in the testdata for testcase specific parameters """ """ This constant defines main node in the testdata for testcase specific parameters """
DATA_NODE_TABLES = "_tables"
""" This constant defines the main node in the testdata for the steps to execute """
DATA_NODE_DDL = "ddl" DATA_NODE_DDL = "ddl"
""" This constant defines the node for data scheme (DataDefinitionLanguage) """ This constant defines the node for data scheme (DataDefinitionLanguage)
The fields are defined in data_const (D) """ The fields are defined in data_const (D) """

144
test/test_tdata.py

@ -1,9 +1,12 @@
import unittest import unittest
import utils.tdata_tool as t import utils.tdata_tool as t
import basic.constants as B
import utils.data_const as D
import basic.program import basic.program
import os import os
class MyTestCase(unittest.TestCase): class MyTestCase(unittest.TestCase):
def runTest(self): def runTest(self):
self.test_tdata() self.test_tdata()
@ -22,7 +25,7 @@ class MyTestCase(unittest.TestCase):
tdata = t.getTestdata() tdata = t.getTestdata()
self.assertEqual(("steps" in tdata), True) self.assertEqual(("steps" in tdata), True)
def test_getCsvSpec(self): def test_getCsvSpec_data(self):
job = basic.program.Job("unit") job = basic.program.Job("unit")
tdata = {} tdata = {}
args = {"application": "TEST", "application": "ENV01", "modus": "unit", "loglevel": "debug", args = {"application": "TEST", "application": "ENV01", "modus": "unit", "loglevel": "debug",
@ -30,9 +33,144 @@ class MyTestCase(unittest.TestCase):
"modus": "unit"} "modus": "unit"}
job.par.setParameterArgs(args) job.par.setParameterArgs(args)
filename = os.path.join(job.conf.confs["paths"]["testdata"], getattr(job.par, "tdsrc"), getattr(job.par, "tdname") + ".csv") filename = os.path.join(job.conf.confs["paths"]["testdata"], getattr(job.par, "tdsrc"), getattr(job.par, "tdname") + ".csv")
tdata = t.getCsvSpec(job.m, filename, "data") """
print("111") a) data : like a table with data-array of key-value-pairs
a_0 is keyword [option, step, CSV_HEADER_START ]
a_0 : { a_1 : { f_1 : v_1, .... } # option, step
a_0 : { .. a_n : { _header : [ .. ], _data : [ rows... ] # table, node
"""
tests = ["malformated", "comments", D.CSV_BLOCK_OPTION, D.CSV_BLOCK_STEP, B.DATA_NODE_TABLES]
if "comments" in tests:
specLines = [
";;;;;;",
"#;;;;;;"
]
tdata = t.parseCsvSpec(job.m, specLines, D.CSV_SPECTYPE_DATA)
self.assertEqual(0, len(tdata))
if "malformated" in tests:
malformat = "option;arg;;;;;"
specLines = [
"option:par;arg;;;;;",
malformat,
"#option:nopar;arg;;;;;",
"#;;;;;;"
]
self.assertRaises(Exception, t.parseCsvSpec, (job.m, specLines, D.CSV_SPECTYPE_DATA))
malformat = "step;component;1;arg:val;;;;;"
specLines = [
"step:1;component;1;arg:val;;;",
malformat
]
# TODO sortierung nicht ausgwertet
# self.assertRaises(D.EXCP_MALFORMAT+malformat, t.parseCsvSpec, (job.m, specLines, D.CSV_SPECTYPE_DATA))
malformat = "step:2;component;1;arg;;;;;"
specLines = [
"step:1;component;1;arg:val;;;",
malformat
]
self.assertRaises(Exception, t.parseCsvSpec, (job.m, specLines, D.CSV_SPECTYPE_DATA))
specLines = [
"option:par;arg;;;;;",
"#option:nopar;arg;;;;;",
"#;;;;;;"
]
if D.CSV_BLOCK_OPTION in tests:
specLines = [
"option:description;something;;;;;",
"#;;;;;;"
]
tdata = t.parseCsvSpec(job.m, specLines, D.CSV_SPECTYPE_DATA)
self.assertEqual(1, len(tdata))
print(tdata)
self.assertIn(D.CSV_BLOCK_OPTION, tdata)
if D.CSV_BLOCK_STEP in tests:
specLines = [
"step:1;testa;1;table:_lofts,action:import;;;;;",
"#;;;;;;"
]
tdata = t.parseCsvSpec(job.m, specLines, D.CSV_SPECTYPE_DATA)
print(tdata)
self.assertEqual(1, len(tdata))
self.assertIn(B.DATA_NODE_STEPS, tdata)
self.assertIsInstance(tdata[B.DATA_NODE_STEPS], list)
for step in tdata[B.DATA_NODE_STEPS]:
print(step)
self.assertIn(B.DATA_NODE_COMP, step)
self.assertIn(B.ATTR_DATA_REF, step)
self.assertIn(B.ATTR_STEP_ARGS, step)
if B.DATA_NODE_TABLES in tests:
specLines = [
"table:testa:lofts;_nr;street;city;zip;state;beds;baths;sqft;type;price;latitude;longitude",
"testa:lofts;1;stra;town;12345;usa;4;1;50;house;111;45;8",
"#;;;;;;"
]
tdata = t.parseCsvSpec(job.m, specLines, B.DATA_NODE_TABLES)
print(tdata)
self.assertEqual(1, len(tdata))
self.assertIn(B.DATA_NODE_TABLES, tdata)
self.assertIsInstance(tdata[B.DATA_NODE_TABLES], dict)
for k in tdata[B.DATA_NODE_TABLES]["testa"]:
table = tdata[B.DATA_NODE_TABLES]["testa"][k]
self.assertIn(B.DATA_NODE_HEADER, table)
self.assertIn(B.DATA_NODE_DATA, table)
def xtest_getCsvSpec_tree(self):
job = basic.program.Job("unit")
tdata = {}
args = {"application": "TEST", "application": "ENV01", "modus": "unit", "loglevel": "debug",
"tdtyp": "csv", "tdsrc": "TC0001", "tdname": "testspec",
"modus": "unit"}
job.par.setParameterArgs(args)
""""
b) tree : as a tree - the rows must be unique identified by the first column
a_0 is keyword in CSV_HEADER_START
a_0 : { .. a_n : { _header : [ fields.. ], _data : { field : value }
"""
def xtest_getCsvSpec_key(self):
job = basic.program.Job("unit")
tdata = {}
args = {"application": "TEST", "application": "ENV01", "modus": "unit", "loglevel": "debug",
"tdtyp": "csv", "tdsrc": "TC0001", "tdname": "testspec",
"modus": "unit"}
job.par.setParameterArgs(args)
""""
c) keys : as a tree - the rows must be unique identified by the first column
a_0 is keyword in CSV_HEADER_START
a_1 ... a_n is key characterized by header-field like _fk* or _pk*
a_0 : { .. a_n : { _keys : [ _fpk*.. ] , _header : [ fields.. ], _data : { pk_0 : { ... pk_n : { field : value }
"""
def xtest_getCsvSpec_conf(self):
job = basic.program.Job("unit")
tdata = {}
args = {"application": "TEST", "application": "ENV01", "modus": "unit", "loglevel": "debug",
"tdtyp": "csv", "tdsrc": "TC0001", "tdname": "testspec",
"modus": "unit"}
job.par.setParameterArgs(args)
""""
d) conf:
_header : [ field_0, ... ]
{ field_0 : { attr_0 : val_0, .. }, field_1 : { ... }, ... }
"""
specLines = [
"table:lofts;_field;field;type;acceptance;key",
"lofts;street;a;str;;T:1",
";city;b;str;;F:1",
"#;;;;;;"
]
tdata = t.parseCsvSpec(job.m, specLines, D.CSV_SPECTYPE_CONF)
print(tdata) print(tdata)
self.assertEqual(1, len(tdata))
self.assertNotIn(B.DATA_NODE_TABLES, tdata)
self.assertIn("lofts", tdata)
table = tdata["lofts"]
self.assertIn(B.DATA_NODE_HEADER, table)
self.assertIn(B.DATA_NODE_DATA, table)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()

7
utils/data_const.py

@ -29,12 +29,19 @@ DATA_SRC_CSV = "csv"
CSV_HEADER_START = ["node", "table", "tabelle"] CSV_HEADER_START = ["node", "table", "tabelle"]
CSV_DELIMITER = ";" CSV_DELIMITER = ";"
"""
internal structure of testdata
"""
CSV_SPECTYPE_DATA = "data" CSV_SPECTYPE_DATA = "data"
CSV_SPECTYPE_TREE = "tree" CSV_SPECTYPE_TREE = "tree"
CSV_SPECTYPE_KEYS = "keys" CSV_SPECTYPE_KEYS = "keys"
CSV_SPECTYPE_CONF = "conf" CSV_SPECTYPE_CONF = "conf"
CSV_NODETYPE_KEYS = "_keys" CSV_NODETYPE_KEYS = "_keys"
CSV_BLOCK_OPTION = "option"
CSV_BLOCK_STEP = "step"
EXCP_MALFORMAT = "malformated line: "
ATTR_SRC_TYPE = "tdtyp" ATTR_SRC_TYPE = "tdtyp"
ATTR_SRC_DATA = "tdsrc" ATTR_SRC_DATA = "tdsrc"
ATTR_SRC_NAME = "tdname" ATTR_SRC_NAME = "tdname"

2
utils/file_tool.py

@ -180,7 +180,7 @@ def readFileDict(path, msg):
doc = json.load(file) doc = json.load(file)
file.close() file.close()
elif D.DFILE_TYPE_CSV in path[-5:]: elif D.DFILE_TYPE_CSV in path[-5:]:
doc = utils.tdata_tool.getCsvSpec(msg, path, "conf") doc = utils.tdata_tool.getCsvSpec(msg, path, D.CSV_SPECTYPE_CONF)
return doc return doc
def writeFileText(msg, path, text, enc="utf-8"): def writeFileText(msg, path, text, enc="utf-8"):

37
utils/tdata_tool.py

@ -29,6 +29,7 @@ import utils.data_const as D
TOOL_NAME = "tdata_tool" TOOL_NAME = "tdata_tool"
""" name of the tool in order to switch debug-info on """ """ name of the tool in order to switch debug-info on """
TDATA_NODES = [ D.CSV_BLOCK_OPTION ]
def getTdataAttr(): def getTdataAttr():
job = basic.program.Job.getInstance() job = basic.program.Job.getInstance()
@ -74,7 +75,7 @@ def getTestdata():
data = getCsvSpec(job.m, filename, D.CSV_SPECTYPE_DATA) data = getCsvSpec(job.m, filename, D.CSV_SPECTYPE_DATA)
for k in data: for k in data:
tdata[k] = data[k] tdata[k] = data[k]
if (k == "option"): if (k == D.CSV_BLOCK_OPTION):
for p in data[k]: for p in data[k]:
setattr(job.par, p, data[k][p]) setattr(job.par, p, data[k][p])
else: else:
@ -100,11 +101,16 @@ def getCsvSpec(msg, filename, type):
_header : [ field_0, ... ] _header : [ field_0, ... ]
{ field_0 : { attr_0 : val_0, .. }, field_1 : { ... }, ... } { field_0 : { attr_0 : val_0, .. }, field_1 : { ... }, ... }
""" """
lines = utils.file_tool.readFileLines(filename, msg)
return parseCsvSpec(msg, lines, type)
def parseCsvSpec(msg, lines, type):
data = {} data = {}
header = [] header = []
h = [] # from a[] h = [] # from a[]
lines = utils.file_tool.readFileLines(filename, msg)
status = "start" status = "start"
tableDict = {} tableDict = {}
for l in lines: for l in lines:
print("lines "+l) print("lines "+l)
@ -117,9 +123,9 @@ def getCsvSpec(msg, filename, type):
continue continue
a = fields[0].lower().split(":") a = fields[0].lower().split(":")
# keywords option, step, table # keywords option, step, table
if (a[0] not in data): if a[0] not in data and (a[0] in TDATA_NODES):
data[a[0]] = {} data[a[0]] = {}
if (a[0].lower() == "step"): if (a[0].lower() == D.CSV_BLOCK_STEP):
if (not B.DATA_NODE_STEPS in data): if (not B.DATA_NODE_STEPS in data):
data[B.DATA_NODE_STEPS] = [] data[B.DATA_NODE_STEPS] = []
step = {} step = {}
@ -129,16 +135,22 @@ def getCsvSpec(msg, filename, type):
a = fields[3].split(",") a = fields[3].split(",")
for arg in a: for arg in a:
b = arg.split(":") b = arg.split(":")
step[B.D.ATTR_STEP_ARGS][b[0]] = b[1] if len(b) < 2:
raise Exception(D.EXCP_MALFORMAT + "" + l)
step[B.ATTR_STEP_ARGS][b[0]] = b[1]
data[B.DATA_NODE_STEPS].append(step) data[B.DATA_NODE_STEPS].append(step)
continue continue
elif (a[0].lower() == "option"): elif (a[0].lower() == D.CSV_BLOCK_OPTION):
if len(a) < 2:
raise Exception(D.EXCP_MALFORMAT+""+l)
data[a[0]][a[1]] = fields[1] data[a[0]][a[1]] = fields[1]
continue continue
elif (a[0].lower() in D.CSV_HEADER_START): elif (a[0].lower() in D.CSV_HEADER_START):
# create deep structure a_0 ... a_n # create deep structure a_0 ... a_n
print("tdata 136 CSV_HEADER_START "+str(len(a))) print("tdata 136 CSV_HEADER_START "+str(len(a)))
h = a h = a
data[B.DATA_NODE_TABLES] = {}
h[0] = B.DATA_NODE_TABLES
tableDict = getTabContent(msg, data, h) tableDict = getTabContent(msg, data, h)
i = 0 i = 0
for f in fields: for f in fields:
@ -149,6 +161,7 @@ def getCsvSpec(msg, filename, type):
break break
header.append(f) header.append(f)
tableDict[B.DATA_NODE_HEADER] = header tableDict[B.DATA_NODE_HEADER] = header
print("tdata 165 header "+str(header))
if type == D.CSV_SPECTYPE_TREE: if type == D.CSV_SPECTYPE_TREE:
tableDict[B.DATA_NODE_DATA] = {} tableDict[B.DATA_NODE_DATA] = {}
elif type == D.CSV_SPECTYPE_KEYS: elif type == D.CSV_SPECTYPE_KEYS:
@ -183,12 +196,15 @@ def getCsvSpec(msg, filename, type):
setTabContent(msg, data, tableDict, h) setTabContent(msg, data, tableDict, h)
if (status in [D.CSV_SPECTYPE_DATA, D.CSV_SPECTYPE_KEYS]): if (status in [D.CSV_SPECTYPE_DATA, D.CSV_SPECTYPE_KEYS]):
tableDict = getTabContent(msg, data, h) tableDict = getTabContent(msg, data, h)
if type == D.CSV_SPECTYPE_DATA: if type == D.CSV_SPECTYPE_CONF:
tableDict[B.DATA_NODE_HEADER] = headerFields tableDict[B.DATA_NODE_HEADER] = headerFields
setTabContent(msg, data, tableDict, h) setTabContent(msg, data, tableDict, h)
print("return getCsvSpec "+str(data)) if type == D.CSV_SPECTYPE_CONF:
data = data[B.DATA_NODE_TABLES]
print("return getCsvSpec "+str(data))
return data return data
def setTabContent(msg, data, tabledata, path): def setTabContent(msg, data, tabledata, path):
if len(path) >= 2 and path[1] not in data[path[0]]: if len(path) >= 2 and path[1] not in data[path[0]]:
data[path[0]][path[1]] = {} data[path[0]][path[1]] = {}
@ -203,6 +219,7 @@ def setTabContent(msg, data, tabledata, path):
elif len(path) == 4: elif len(path) == 4:
data[path[0]][path[1]][path[2]][path[3]] = tabledata data[path[0]][path[1]][path[2]][path[3]] = tabledata
def getTabContent(msg, data, path): def getTabContent(msg, data, path):
if len(path) >= 2 and path[1] not in data[path[0]]: if len(path) >= 2 and path[1] not in data[path[0]]:
data[path[0]][path[1]] = {} data[path[0]][path[1]] = {}
@ -216,6 +233,8 @@ def getTabContent(msg, data, path):
return data[path[0]][path[1]][path[2]] return data[path[0]][path[1]][path[2]]
elif len(path) == 4: elif len(path) == 4:
return data[path[0]][path[1]][path[2]][path[3]] return data[path[0]][path[1]][path[2]][path[3]]
else:
pass
def readCsv(msg, filename, comp, aliasNode=""): def readCsv(msg, filename, comp, aliasNode=""):
job = basic.program.Job.getInstance() job = basic.program.Job.getInstance()

Loading…
Cancel
Save