Browse Source

tests for tdata

master
Ulrich Carmesin 3 years ago
parent
commit
a7fd2f2929
  1. 2
      basic/constants.py
  2. 144
      test/test_tdata.py
  3. 7
      utils/data_const.py
  4. 2
      utils/file_tool.py
  5. 37
      utils/tdata_tool.py

2
basic/constants.py

@ -70,6 +70,8 @@ DATA_NODE_STEPS = "_steps"
""" This constant defines the main node in the testdata for the steps to execute """
DATA_NODE_OPTION = "_option"
""" This constant defines main node in the testdata for testcase specific parameters """
DATA_NODE_TABLES = "_tables"
""" This constant defines the main node in the testdata for the steps to execute """
DATA_NODE_DDL = "ddl"
""" This constant defines the node for data scheme (DataDefinitionLanguage)
The fields are defined in data_const (D) """

144
test/test_tdata.py

@ -1,9 +1,12 @@
import unittest
import utils.tdata_tool as t
import basic.constants as B
import utils.data_const as D
import basic.program
import os
class MyTestCase(unittest.TestCase):
def runTest(self):
self.test_tdata()
@ -22,7 +25,7 @@ class MyTestCase(unittest.TestCase):
tdata = t.getTestdata()
self.assertEqual(("steps" in tdata), True)
def test_getCsvSpec(self):
def test_getCsvSpec_data(self):
job = basic.program.Job("unit")
tdata = {}
args = {"application": "TEST", "application": "ENV01", "modus": "unit", "loglevel": "debug",
@ -30,9 +33,144 @@ class MyTestCase(unittest.TestCase):
"modus": "unit"}
job.par.setParameterArgs(args)
filename = os.path.join(job.conf.confs["paths"]["testdata"], getattr(job.par, "tdsrc"), getattr(job.par, "tdname") + ".csv")
tdata = t.getCsvSpec(job.m, filename, "data")
print("111")
"""
a) data : like a table with data-array of key-value-pairs
a_0 is keyword [option, step, CSV_HEADER_START ]
a_0 : { a_1 : { f_1 : v_1, .... } # option, step
a_0 : { .. a_n : { _header : [ .. ], _data : [ rows... ] # table, node
"""
tests = ["malformated", "comments", D.CSV_BLOCK_OPTION, D.CSV_BLOCK_STEP, B.DATA_NODE_TABLES]
if "comments" in tests:
specLines = [
";;;;;;",
"#;;;;;;"
]
tdata = t.parseCsvSpec(job.m, specLines, D.CSV_SPECTYPE_DATA)
self.assertEqual(0, len(tdata))
if "malformated" in tests:
malformat = "option;arg;;;;;"
specLines = [
"option:par;arg;;;;;",
malformat,
"#option:nopar;arg;;;;;",
"#;;;;;;"
]
self.assertRaises(Exception, t.parseCsvSpec, (job.m, specLines, D.CSV_SPECTYPE_DATA))
malformat = "step;component;1;arg:val;;;;;"
specLines = [
"step:1;component;1;arg:val;;;",
malformat
]
# TODO sortierung nicht ausgwertet
# self.assertRaises(D.EXCP_MALFORMAT+malformat, t.parseCsvSpec, (job.m, specLines, D.CSV_SPECTYPE_DATA))
malformat = "step:2;component;1;arg;;;;;"
specLines = [
"step:1;component;1;arg:val;;;",
malformat
]
self.assertRaises(Exception, t.parseCsvSpec, (job.m, specLines, D.CSV_SPECTYPE_DATA))
specLines = [
"option:par;arg;;;;;",
"#option:nopar;arg;;;;;",
"#;;;;;;"
]
if D.CSV_BLOCK_OPTION in tests:
specLines = [
"option:description;something;;;;;",
"#;;;;;;"
]
tdata = t.parseCsvSpec(job.m, specLines, D.CSV_SPECTYPE_DATA)
self.assertEqual(1, len(tdata))
print(tdata)
self.assertIn(D.CSV_BLOCK_OPTION, tdata)
if D.CSV_BLOCK_STEP in tests:
specLines = [
"step:1;testa;1;table:_lofts,action:import;;;;;",
"#;;;;;;"
]
tdata = t.parseCsvSpec(job.m, specLines, D.CSV_SPECTYPE_DATA)
print(tdata)
self.assertEqual(1, len(tdata))
self.assertIn(B.DATA_NODE_STEPS, tdata)
self.assertIsInstance(tdata[B.DATA_NODE_STEPS], list)
for step in tdata[B.DATA_NODE_STEPS]:
print(step)
self.assertIn(B.DATA_NODE_COMP, step)
self.assertIn(B.ATTR_DATA_REF, step)
self.assertIn(B.ATTR_STEP_ARGS, step)
if B.DATA_NODE_TABLES in tests:
specLines = [
"table:testa:lofts;_nr;street;city;zip;state;beds;baths;sqft;type;price;latitude;longitude",
"testa:lofts;1;stra;town;12345;usa;4;1;50;house;111;45;8",
"#;;;;;;"
]
tdata = t.parseCsvSpec(job.m, specLines, B.DATA_NODE_TABLES)
print(tdata)
self.assertEqual(1, len(tdata))
self.assertIn(B.DATA_NODE_TABLES, tdata)
self.assertIsInstance(tdata[B.DATA_NODE_TABLES], dict)
for k in tdata[B.DATA_NODE_TABLES]["testa"]:
table = tdata[B.DATA_NODE_TABLES]["testa"][k]
self.assertIn(B.DATA_NODE_HEADER, table)
self.assertIn(B.DATA_NODE_DATA, table)
def xtest_getCsvSpec_tree(self):
job = basic.program.Job("unit")
tdata = {}
args = {"application": "TEST", "application": "ENV01", "modus": "unit", "loglevel": "debug",
"tdtyp": "csv", "tdsrc": "TC0001", "tdname": "testspec",
"modus": "unit"}
job.par.setParameterArgs(args)
""""
b) tree : as a tree - the rows must be unique identified by the first column
a_0 is keyword in CSV_HEADER_START
a_0 : { .. a_n : { _header : [ fields.. ], _data : { field : value }
"""
def xtest_getCsvSpec_key(self):
job = basic.program.Job("unit")
tdata = {}
args = {"application": "TEST", "application": "ENV01", "modus": "unit", "loglevel": "debug",
"tdtyp": "csv", "tdsrc": "TC0001", "tdname": "testspec",
"modus": "unit"}
job.par.setParameterArgs(args)
""""
c) keys : as a tree - the rows must be unique identified by the first column
a_0 is keyword in CSV_HEADER_START
a_1 ... a_n is key characterized by header-field like _fk* or _pk*
a_0 : { .. a_n : { _keys : [ _fpk*.. ] , _header : [ fields.. ], _data : { pk_0 : { ... pk_n : { field : value }
"""
def xtest_getCsvSpec_conf(self):
job = basic.program.Job("unit")
tdata = {}
args = {"application": "TEST", "application": "ENV01", "modus": "unit", "loglevel": "debug",
"tdtyp": "csv", "tdsrc": "TC0001", "tdname": "testspec",
"modus": "unit"}
job.par.setParameterArgs(args)
""""
d) conf:
_header : [ field_0, ... ]
{ field_0 : { attr_0 : val_0, .. }, field_1 : { ... }, ... }
"""
specLines = [
"table:lofts;_field;field;type;acceptance;key",
"lofts;street;a;str;;T:1",
";city;b;str;;F:1",
"#;;;;;;"
]
tdata = t.parseCsvSpec(job.m, specLines, D.CSV_SPECTYPE_CONF)
print(tdata)
self.assertEqual(1, len(tdata))
self.assertNotIn(B.DATA_NODE_TABLES, tdata)
self.assertIn("lofts", tdata)
table = tdata["lofts"]
self.assertIn(B.DATA_NODE_HEADER, table)
self.assertIn(B.DATA_NODE_DATA, table)
if __name__ == '__main__':
unittest.main()

7
utils/data_const.py

@ -29,12 +29,19 @@ DATA_SRC_CSV = "csv"
CSV_HEADER_START = ["node", "table", "tabelle"]
CSV_DELIMITER = ";"
"""
internal structure of testdata
"""
CSV_SPECTYPE_DATA = "data"
CSV_SPECTYPE_TREE = "tree"
CSV_SPECTYPE_KEYS = "keys"
CSV_SPECTYPE_CONF = "conf"
CSV_NODETYPE_KEYS = "_keys"
CSV_BLOCK_OPTION = "option"
CSV_BLOCK_STEP = "step"
EXCP_MALFORMAT = "malformated line: "
ATTR_SRC_TYPE = "tdtyp"
ATTR_SRC_DATA = "tdsrc"
ATTR_SRC_NAME = "tdname"

2
utils/file_tool.py

@ -180,7 +180,7 @@ def readFileDict(path, msg):
doc = json.load(file)
file.close()
elif D.DFILE_TYPE_CSV in path[-5:]:
doc = utils.tdata_tool.getCsvSpec(msg, path, "conf")
doc = utils.tdata_tool.getCsvSpec(msg, path, D.CSV_SPECTYPE_CONF)
return doc
def writeFileText(msg, path, text, enc="utf-8"):

37
utils/tdata_tool.py

@ -29,6 +29,7 @@ import utils.data_const as D
TOOL_NAME = "tdata_tool"
""" name of the tool in order to switch debug-info on """
TDATA_NODES = [ D.CSV_BLOCK_OPTION ]
def getTdataAttr():
job = basic.program.Job.getInstance()
@ -74,7 +75,7 @@ def getTestdata():
data = getCsvSpec(job.m, filename, D.CSV_SPECTYPE_DATA)
for k in data:
tdata[k] = data[k]
if (k == "option"):
if (k == D.CSV_BLOCK_OPTION):
for p in data[k]:
setattr(job.par, p, data[k][p])
else:
@ -100,11 +101,16 @@ def getCsvSpec(msg, filename, type):
_header : [ field_0, ... ]
{ field_0 : { attr_0 : val_0, .. }, field_1 : { ... }, ... }
"""
lines = utils.file_tool.readFileLines(filename, msg)
return parseCsvSpec(msg, lines, type)
def parseCsvSpec(msg, lines, type):
data = {}
header = []
h = [] # from a[]
lines = utils.file_tool.readFileLines(filename, msg)
h = [] # from a[]
status = "start"
tableDict = {}
for l in lines:
print("lines "+l)
@ -117,9 +123,9 @@ def getCsvSpec(msg, filename, type):
continue
a = fields[0].lower().split(":")
# keywords option, step, table
if (a[0] not in data):
if a[0] not in data and (a[0] in TDATA_NODES):
data[a[0]] = {}
if (a[0].lower() == "step"):
if (a[0].lower() == D.CSV_BLOCK_STEP):
if (not B.DATA_NODE_STEPS in data):
data[B.DATA_NODE_STEPS] = []
step = {}
@ -129,16 +135,22 @@ def getCsvSpec(msg, filename, type):
a = fields[3].split(",")
for arg in a:
b = arg.split(":")
step[B.D.ATTR_STEP_ARGS][b[0]] = b[1]
if len(b) < 2:
raise Exception(D.EXCP_MALFORMAT + "" + l)
step[B.ATTR_STEP_ARGS][b[0]] = b[1]
data[B.DATA_NODE_STEPS].append(step)
continue
elif (a[0].lower() == "option"):
elif (a[0].lower() == D.CSV_BLOCK_OPTION):
if len(a) < 2:
raise Exception(D.EXCP_MALFORMAT+""+l)
data[a[0]][a[1]] = fields[1]
continue
elif (a[0].lower() in D.CSV_HEADER_START):
# create deep structure a_0 ... a_n
print("tdata 136 CSV_HEADER_START "+str(len(a)))
h = a
data[B.DATA_NODE_TABLES] = {}
h[0] = B.DATA_NODE_TABLES
tableDict = getTabContent(msg, data, h)
i = 0
for f in fields:
@ -149,6 +161,7 @@ def getCsvSpec(msg, filename, type):
break
header.append(f)
tableDict[B.DATA_NODE_HEADER] = header
print("tdata 165 header "+str(header))
if type == D.CSV_SPECTYPE_TREE:
tableDict[B.DATA_NODE_DATA] = {}
elif type == D.CSV_SPECTYPE_KEYS:
@ -183,12 +196,15 @@ def getCsvSpec(msg, filename, type):
setTabContent(msg, data, tableDict, h)
if (status in [D.CSV_SPECTYPE_DATA, D.CSV_SPECTYPE_KEYS]):
tableDict = getTabContent(msg, data, h)
if type == D.CSV_SPECTYPE_DATA:
if type == D.CSV_SPECTYPE_CONF:
tableDict[B.DATA_NODE_HEADER] = headerFields
setTabContent(msg, data, tableDict, h)
print("return getCsvSpec "+str(data))
if type == D.CSV_SPECTYPE_CONF:
data = data[B.DATA_NODE_TABLES]
print("return getCsvSpec "+str(data))
return data
def setTabContent(msg, data, tabledata, path):
if len(path) >= 2 and path[1] not in data[path[0]]:
data[path[0]][path[1]] = {}
@ -203,6 +219,7 @@ def setTabContent(msg, data, tabledata, path):
elif len(path) == 4:
data[path[0]][path[1]][path[2]][path[3]] = tabledata
def getTabContent(msg, data, path):
if len(path) >= 2 and path[1] not in data[path[0]]:
data[path[0]][path[1]] = {}
@ -216,6 +233,8 @@ def getTabContent(msg, data, path):
return data[path[0]][path[1]][path[2]]
elif len(path) == 4:
return data[path[0]][path[1]][path[2]][path[3]]
else:
pass
def readCsv(msg, filename, comp, aliasNode=""):
job = basic.program.Job.getInstance()

Loading…
Cancel
Save