Browse Source

matchtypes

master
Ulrich Carmesin 3 years ago
parent
commit
f426f224b1
  1. 42
      components/testexec.py
  2. 63
      test/test_compare.py
  3. 107
      utils/match_tool.py
  4. 96
      utils/tdata_tool.py

42
components/testexec.py

@ -31,11 +31,15 @@ from datetime import datetime
import basic.message import basic.message
import basic.program import basic.program
import inspect import inspect
import os
import utils.db_abstract import utils.db_abstract
import basic.toolHandling import basic.toolHandling
import components.component import components.component
import basic.componentHandling import basic.componentHandling
import utils.db_abstract import utils.db_abstract
import utils.path_tool
import utils.match_tool
import utils.tdata_tool
import basic.constants as B import basic.constants as B
@ -263,5 +267,43 @@ class Testexecuter():
3 rating the difference if this can be accepted 3 rating the difference if this can be accepted
:return: :return:
""" """
job = basic.program.Job.getInstance()
verify = job.getDebugLevel(self.name)
cm = basic.componentHandling.ComponentManager.getInstance()
data = {}
matching = utils.match_tool.Matching()
if "db" in self.conf["artifact"]:
for t in self.conf["artifact"]["db"]:
if t in ["type"]:
continue
# fill each data into matching-object
for side in utils.match_tool.MATCH_SIDES:
if side == utils.match_tool.MATCH_SIDE_PRESTEP:
if "prestep" in self.conf["artifact"]["db"][t]:
a = self.conf["artifact"]["db"][t]["prestep"].split(":")
if a[0] != self.name:
comp = cm.getComponent(a[0])
else:
comp = self
path = os.path.join(utils.path_tool.composePatttern(
"{"+utils.match_tool.MATCH[utils.match_tool.MATCH_SIDE_POSTACTUAL]["filepattern"]+"}", comp), a[1]+".csv")
pass
elif side == utils.match_tool.MATCH_SIDE_TESTCASE:
if hasattr(job.par, "testcase_example"):
path = os.path.join(utils.path_tool.composePatttern(
"{"+utils.match_tool.MATCH[utils.match_tool.MATCH_SIDE_POSTEXPECT]["filepattern"]+"}", self), t+".csv")
path.replace(getattr(job.par, "testcase"), getattr(job.par, "testcase_example"))
else:
path = os.path.join(utils.path_tool.composePatttern("{"+utils.match_tool.MATCH[side]["filepattern"]+"}", self), t+".csv")
filedata = utils.tdata_tool.readCsv(self.m, path, self)
data[side] = utils.match_tool.MATCH[side]
data[side]["path"] = path
data[side]["data"] = filedata
# execute the matches
for type in utils.match_tool.MATCH_TYPES:
matching.setData(data, type)
text = utils.match_tool.matchTree(matching)
# write text
pass
pass pass

63
test/test_compare.py

@ -2,9 +2,13 @@ import json
import unittest import unittest
from basic.program import Job from basic.program import Job
import utils.match_tool import utils.match_tool
import utils.match_tool as M
import components.component import components.component
tdata = { tdata = {
"postReq": { M.MATCH_SIDE_POSTEXPECT: {
"path": "",
"data": {
"database": { "database": {
"scheme": { "scheme": {
"table": { "table": {
@ -12,8 +16,11 @@ tdata = {
} }
} }
} }
}
}, },
"preAct": { M.MATCH_SIDE_PREACTUAL: {
"path": "",
"data": {
"database": { "database": {
"scheme": { "scheme": {
"table": { "table": {
@ -26,8 +33,11 @@ tdata = {
} }
} }
} }
}
}, },
"postAct": { M.MATCH_SIDE_POSTACTUAL: {
"path": "",
"data": {
"database": { "database": {
"scheme": { "scheme": {
"table": { "table": {
@ -41,6 +51,7 @@ tdata = {
} }
} }
} }
}
} }
conf = { conf = {
"ddl": { "ddl": {
@ -62,12 +73,12 @@ conf = {
class MyTestCase(unittest.TestCase): class MyTestCase(unittest.TestCase):
def runTest(self): def runTest(self):
self.test_matchstart() self.test_matchstart()
self.test_hitmanage() #self.test_hitmanage()
self.test_similarity() #self.test_similarity()
self.test_bestfit() #self.test_bestfit()
self.test_compareRow() #self.test_compareRow()
self.test_compareRows() #self.test_compareRows()
self.test_match() #self.test_match()
def test_matchstart(self): def test_matchstart(self):
job = Job("unit") job = Job("unit")
@ -75,10 +86,10 @@ class MyTestCase(unittest.TestCase):
comp.files = { "A": "/home/match/per.csv", "B": "/home/match/post.csv"} comp.files = { "A": "/home/match/per.csv", "B": "/home/match/post.csv"}
comp.conf = conf comp.conf = conf
matching = utils.match_tool.Matching(comp) matching = utils.match_tool.Matching(comp)
matching.setData(tdata, utils.match_tool.MATCH_PREPOST) matching.setData(tdata, utils.match_tool.MATCH_SUCCESS)
print(matching.htmltext) print(matching.htmltext)
def test_hitmanage(self): def xtest_hitmanage(self):
comp = components.component.Component() comp = components.component.Component()
comp.files = { "A": "/home/match/per.csv", "B": "/home/match/post.csv"} comp.files = { "A": "/home/match/per.csv", "B": "/home/match/post.csv"}
comp.conf = conf comp.conf = conf
@ -93,20 +104,20 @@ class MyTestCase(unittest.TestCase):
self.assertEqual(matching.isHitB("b0005"), True, "doesnt exist") self.assertEqual(matching.isHitB("b0005"), True, "doesnt exist")
self.assertEqual(("b0005" in matching.linksB), True, "doesnt exist") self.assertEqual(("b0005" in matching.linksB), True, "doesnt exist")
def test_similarity(self): def xtest_similarity(self):
matching = self.getMatching() matching = self.getMatching()
utils.match_tool.getSimilarity(matching, ":database:scheme:table:_data", utils.match_tool.getSimilarity(matching, ":database:scheme:table:_data",
tdata["preAct"]["database"]["scheme"]["table"]["_data"][0], tdata[M.MATCH_SIDE_PREACTUAL]["data"]["database"]["scheme"]["table"]["_data"][0],
tdata["postAct"]["database"]["scheme"]["table"]["_data"][0],1) tdata[M.MATCH_SIDE_POSTACTUAL]["data"]["database"]["scheme"]["table"]["_data"][0],1)
def test_bestfit(self): def xtest_bestfit(self):
job = Job("unit") job = Job("unit")
comp = components.component.Component() comp = components.component.Component()
comp.files = { "A": "/home/match/per.csv", "B": "/home/match/post.csv"} comp.files = { "A": "/home/match/per.csv", "B": "/home/match/post.csv"}
comp.conf = conf comp.conf = conf
matching = utils.match_tool.Matching(comp) matching = utils.match_tool.Matching(comp)
matching.sideA = tdata["preAct"]["database"]["scheme"]["table"]["_data"] matching.sideA = tdata[M.MATCH_SIDE_PREACTUAL]["data"]["database"]["scheme"]["table"]["_data"]
matching.sideB = tdata["postAct"]["database"]["scheme"]["table"]["_data"] matching.sideB = tdata[M.MATCH_SIDE_POSTACTUAL]["data"]["database"]["scheme"]["table"]["_data"]
utils.match_tool.matchBestfit(matching, ":database:scheme:table:_data") utils.match_tool.matchBestfit(matching, ":database:scheme:table:_data")
print(json.dumps(matching.linksA)) print(json.dumps(matching.linksA))
print(json.dumps(matching.linksB)) print(json.dumps(matching.linksB))
@ -116,30 +127,32 @@ class MyTestCase(unittest.TestCase):
print(json.dumps(matching.linksB)) print(json.dumps(matching.linksB))
print(json.dumps(matching.nomatch)) print(json.dumps(matching.nomatch))
def test_compareRow(self): def xtest_compareRow(self):
job = Job("unit") job = Job("unit")
comp = components.component.Component() comp = components.component.Component()
comp.files = { "A": "/home/match/per.csv", "B": "/home/match/post.csv"} comp.files = { "A": "/home/match/per.csv", "B": "/home/match/post.csv"}
comp.conf = conf comp.conf = conf
matching = self.getMatching() matching = self.getMatching()
matching.sideA = tdata["preAct"]["database"]["scheme"]["table"]["_data"] matching.sideA = tdata[M.MATCH_SIDE_PREACTUAL]["data"]["database"]["scheme"]["table"]["_data"]
matching.sideB = tdata["postAct"]["database"]["scheme"]["table"]["_data"] matching.sideB = tdata[M.MATCH_SIDE_POSTACTUAL]["data"]["database"]["scheme"]["table"]["_data"]
ddl = conf["ddl"]["database"]["scheme"]["table"] ddl = conf["ddl"]["database"]["scheme"]["table"]
header = [] header = []
for f in ddl["_header"]: for f in ddl["_header"]:
header.append({"field": f, "type": ddl[f]["type"], "acceptance": ddl[f]["acceptance"]}) header.append({"field": f, "type": ddl[f]["type"], "acceptance": ddl[f]["acceptance"]})
i = 1 i = 1
text = utils.match_tool.compareRow(matching, header, tdata["preAct"]["database"]["scheme"]["table"]["_data"][i], tdata["postAct"]["database"]["scheme"]["table"]["_data"][i])
text = utils.match_tool.compareRow(matching, header, tdata[M.MATCH_SIDE_PREACTUAL]["data"]["database"]["scheme"]["table"]["_data"][i],
tdata[M.MATCH_SIDE_POSTACTUAL]["data"]["database"]["scheme"]["table"]["_data"][i])
print(text) print(text)
def test_compareRows(self): def xtest_compareRows(self):
job = Job("unit") job = Job("unit")
comp = components.component.Component() comp = components.component.Component()
comp.files = { "A": "/home/match/per.csv", "B": "/home/match/post.csv"} comp.files = { "A": "/home/match/per.csv", "B": "/home/match/post.csv"}
comp.conf = conf comp.conf = conf
matching = self.getMatching() matching = self.getMatching()
matching.sideA = tdata["preAct"]["database"]["scheme"]["table"]["_data"] matching.sideA = tdata[M.MATCH_SIDE_PREACTUAL]["data"]["database"]["scheme"]["table"]["_data"]
matching.sideB = tdata["postAct"]["database"]["scheme"]["table"]["_data"] matching.sideB = tdata[M.MATCH_SIDE_POSTACTUAL]["data"]["database"]["scheme"]["table"]["_data"]
linksA = {"a0001": "b0001", "a0002": "b0002" } linksA = {"a0001": "b0001", "a0002": "b0002" }
matching.linksA = linksA matching.linksA = linksA
text = utils.match_tool.compareRows(matching, ":database:scheme:table:_data") text = utils.match_tool.compareRows(matching, ":database:scheme:table:_data")
@ -165,7 +178,7 @@ class MyTestCase(unittest.TestCase):
comp.files = { "A": "/home/match/per.csv", "B": "/home/match/post.csv"} comp.files = { "A": "/home/match/per.csv", "B": "/home/match/post.csv"}
comp.conf = conf comp.conf = conf
matching = utils.match_tool.Matching(comp) matching = utils.match_tool.Matching(comp)
matching.setData(tdata, utils.match_tool.MATCH_PREPOST) matching.setData(tdata, M.MATCH_SUCCESS)
matching.difftext = "" matching.difftext = ""
return matching return matching

107
utils/match_tool.py

@ -12,10 +12,71 @@ import basic.constants as B
""" """
""" """
MATCH_SIDE_PREEXPECT = "preexpect"
""" it implies the precondition of the expectation """
MATCH_DICT_PREEXPECT = {
"short": "SV",
"long": "Soll-Vorher",
"filepattern": "rsprecond"
}
MATCH_SIDE_POSTEXPECT = "postexpect"
""" it implies the postcondition of the expectation - it is the expectation"""
MATCH_DICT_POSTEXPECT = {
"short": "SN",
"long": "Soll-Nachher",
"filepattern": "rsprecond"
}
MATCH_SIDE_PREACTUAL = "preactual"
""" it implies the precondition of the actual execution """
MATCH_DICT_PREACTUAL = {
"short": "IV",
"long": "Ist-Vorher",
"filepattern": "rsprecond"
}
MATCH_SIDE_POSTACTUAL = "postactual"
""" it implies the postondition of the actual execution - it is the result """
MATCH_DICT_POSTACTUAL = {
"short": "IN",
"long": "Ist-Nachher",
"filepattern": "rsprecond"
}
MATCH_SIDE_PRESTEP = "prestep"
""" it implies the postcondition of a preceding step of the actual execution - the preceding step must be configured in the component"""
MATCH_DICT_PRESTEP = {
"short": "VS",
"long": "Vorhergehender Schritt (Nachher)",
"filepattern": "rsprecond"
}
MATCH_SIDE_TESTCASE = "testexample"
""" it implies the postcondition of an exemplary testcase - the exemplary testcase must be parametrized """
MATCH_DICT_TESTCASE = {
"short": "VT",
"long": "Vergleichstestfall (Nachher)",
"filepattern": "rsprecond"
}
MATCH_SIDES = [MATCH_SIDE_PREEXPECT, MATCH_SIDE_POSTEXPECT, MATCH_SIDE_PREACTUAL, MATCH_SIDE_POSTACTUAL, MATCH_SIDE_PRESTEP, MATCH_SIDE_TESTCASE]
MATCH_SUCCESS = "success"
""" matches the action between pre- and postcondition of the actual testexecution """
MATCH_PRECOND = "preconditions"
""" matches the preconditions betwenn the required result the the actual testexecution
- just for info if the both executions have the same precondition """
MATCH_POSTCOND = "postconditions"
""" matches the postconditions betwenn the required result the the actual testexecution
- it is the main comparison """
MATCH_PRESTEP = "prestep"
MATCH_TESTEXAMPLE = "testeample"
MATCH_TYPES = [MATCH_PRECOND, MATCH_PRESTEP, MATCH_TESTEXAMPLE, MATCH_SUCCESS, MATCH_POSTCOND]
MATCH = { MATCH = {
"preconditions": { MATCH_SIDE_PREEXPECT : MATCH_DICT_PREEXPECT,
"A": "preReq", MATCH_SIDE_POSTEXPECT : MATCH_DICT_POSTEXPECT,
"B": "preAct", MATCH_SIDE_PREACTUAL : MATCH_DICT_PREACTUAL,
MATCH_SIDE_POSTACTUAL : MATCH_DICT_POSTACTUAL,
MATCH_SIDE_PRESTEP : MATCH_DICT_PRESTEP,
MATCH_SIDE_TESTCASE : MATCH_DICT_TESTCASE,
MATCH_PRECOND: {
"A": MATCH_SIDE_PREEXPECT,
"B": MATCH_SIDE_PREACTUAL,
"shortA": "SV", "shortA": "SV",
"shortB": "IV", "shortB": "IV",
"longA": "Soll-Vorher", "longA": "Soll-Vorher",
@ -23,9 +84,9 @@ MATCH = {
"mode": "info", "mode": "info",
"title": "Pruefung Vorbedingung (Soll-Vorher - Ist-Vorher)" "title": "Pruefung Vorbedingung (Soll-Vorher - Ist-Vorher)"
}, },
"postconditions": { MATCH_POSTCOND: {
"A": "postReq", "A": MATCH_SIDE_POSTEXPECT,
"B": "postAct", "B": MATCH_SIDE_POSTACTUAL,
"shortA": "SN", "shortA": "SN",
"shortB": "IN", "shortB": "IN",
"longA": "Soll-Nachher", "longA": "Soll-Nachher",
@ -33,9 +94,9 @@ MATCH = {
"mode": "hard", "mode": "hard",
"title": "Fachliche Auswertung (Soll-Nachher - Ist-Nachher)" "title": "Fachliche Auswertung (Soll-Nachher - Ist-Nachher)"
}, },
"prepost": { MATCH_SUCCESS: {
"A": "preAct", "A": MATCH_SIDE_PREACTUAL,
"B": "postAct", "B": MATCH_SIDE_POSTACTUAL,
"shortA": "IV", "shortA": "IV",
"shortB": "IN", "shortB": "IN",
"longA": "Ist-Vorher", "longA": "Ist-Vorher",
@ -43,25 +104,27 @@ MATCH = {
"mode": "action", "mode": "action",
"title": "Ablauf-Differenz (Ist-Vorher - Ist-Nachher)" "title": "Ablauf-Differenz (Ist-Vorher - Ist-Nachher)"
}, },
"prestep": { MATCH_PRESTEP: {
"A": "preStep", "A": MATCH_SIDE_PRESTEP,
"B": "postAct", "B": MATCH_SIDE_POSTACTUAL,
"shortA": "VS", "shortA": "VS",
"shortB": "IN", "shortB": "IN",
"longA": "Vor-Schritt", "longA": "Vor-Schritt",
"longB": "Ist-Nachher", "longB": "Ist-Nachher",
"mode": "action", "mode": "action",
"title": "Schritt-Differenz (Vorschritt-Nachher - Ist-Nachher)" "title": "Schritt-Differenz (Vorschritt-Nachher - Ist-Nachher)"
} },
MATCH_TESTEXAMPLE: {
"A": MATCH_SIDE_TESTCASE,
"B": MATCH_SIDE_POSTACTUAL,
"shortA": "VS",
"shortB": "IN",
"longA": "Vor-Schritt",
"longB": "Ist-Nachher",
"mode": "action",
"title": "Schritt-Differenz (Vorschritt-Nachher - Ist-Nachher)"
},
} }
MATCH_PREPOST = "prepost"
""" matches the action between pre- and postcondition of the actual testexecution """
MATCH_PRECOND = "preconditions"
""" matches the preconditions betwenn the required result the the actual testexecution
- just for info if the both executions have the same precondition """
MATCH_POSTCOND = "postconditions"
""" matches the postconditions betwenn the required result the the actual testexecution
- it is the main comparison """
class Matching(): class Matching():
@ -333,7 +396,7 @@ def matchDict(matching, A, B, path):
if (isinstance(B[k], dict)): B[k]["_match"] = "Y" if (isinstance(B[k], dict)): B[k]["_match"] = "Y"
matchElement(matching, A[k], B[k], path+":"+k) matchElement(matching, A[k], B[k], path+":"+k)
else: else:
if (isinstance(A[k], dict)): A[k]["_march"] = "N" if (isinstance(A[k], dict)): A[k]["_match"] = "N"
matchElement(matching, A[k], None, path+":"+k) matchElement(matching, A[k], None, path+":"+k)
if (B is not None): if (B is not None):
for k in B: for k in B:

96
utils/tdata_tool.py

@ -34,6 +34,7 @@ CSV_DELIMITER = ";"
CSV_SPECTYPE_DATA = "data" CSV_SPECTYPE_DATA = "data"
CSV_SPECTYPE_TREE = "tree" CSV_SPECTYPE_TREE = "tree"
CSV_SPECTYPE_KEYS = "keys"
ATTR_SRC_TYPE = "tdtyp" ATTR_SRC_TYPE = "tdtyp"
ATTR_SRC_DATA = "tdsrc" ATTR_SRC_DATA = "tdsrc"
@ -43,10 +44,15 @@ def getTdataAttr():
job = basic.program.Job.getInstance() job = basic.program.Job.getInstance()
out = {} # out = {} #
out[ATTR_SRC_TYPE] = DATA_SRC_DIR out[ATTR_SRC_TYPE] = DATA_SRC_DIR
print("---getTdataAttr")
print(vars(job.par))
if hasattr(job.par, B.PAR_TESTCASE):
out[ATTR_SRC_NAME] = getattr(job.par, B.PAR_TESTCASE) out[ATTR_SRC_NAME] = getattr(job.par, B.PAR_TESTCASE)
elif hasattr(job.par, B.PAR_TESTSUITE):
out[ATTR_SRC_NAME] = getattr(job.par, B.PAR_TESTSUITE)
for p in [ATTR_SRC_TYPE, ATTR_SRC_DATA, ATTR_SRC_NAME]: for p in [ATTR_SRC_TYPE, ATTR_SRC_DATA, ATTR_SRC_NAME]:
out[p] = "" # out[p] = ""
if getattr(job.par, p): if hasattr(job.par, p):
out[p] = getattr(job.par, p) out[p] = getattr(job.par, p)
return out return out
@ -60,10 +66,11 @@ def getTestdata():
:return: :return:
""" """
job = basic.program.Job.getInstance() job = basic.program.Job.getInstance()
reftyp = getattr(job.par, "tdtyp") #reftyp = getattr(job.par, "tdtyp")
source = getattr(job.par, "tdsrc") #source = getattr(job.par, "tdsrc")
criteria = getattr(job.par, "tdname") #criteria = getattr(job.par, "tdname")
tdata = getTdataAttr() # {"reftyp": reftyp, "source": source, "criteria": criteria} tdata = getTdataAttr() # {"reftyp": reftyp, "source": source, "criteria": criteria}
print(tdata)
if tdata[ATTR_SRC_TYPE] == "flaskdb": if tdata[ATTR_SRC_TYPE] == "flaskdb":
# read data-structure with sourcename # read data-structure with sourcename
# connect to source # connect to source
@ -84,13 +91,25 @@ def getTestdata():
def getCsvSpec(msg, filename, type): def getCsvSpec(msg, filename, type):
""" """
get data from a csv-file get data from a csv-file
a = field[0] delimited by :
a) data : like a table with data-array of key-value-pairs a) data : like a table with data-array of key-value-pairs
a_0 is keyword [option, step, CSV_HEADER_START ]
a_0 : { a_1 : { f_1 : v_1, .... } # option, step
a_0 : { .. a_n : { _header : [ .. ], _data : [ rows... ] # table, node
b) tree : as a tree - the rows must be unique identified by the first column b) tree : as a tree - the rows must be unique identified by the first column
a_0 is keyword in CSV_HEADER_START
a_0 : { .. a_n : { _header : [ fields.. ], _data : { field : value }
c) keys : as a tree - the rows must be unique identified by the first column
a_0 is keyword in CSV_HEADER_START
a_1 ... a_n is key characterized by header-field like _fk* or _pk*
a_0 : { .. a_n : { _keys : [ _fpk*.. ] , _header : [ fields.. ], _data : { pk_0 : { ... pk_n : { field : value }
""" """
data = {} data = {}
header = [] header = []
h = [] # from a[]
lines = utils.file_tool.readFileLines(msg, filename) lines = utils.file_tool.readFileLines(msg, filename)
status = "start" status = "start"
tableDict = {}
for l in lines: for l in lines:
print("lines "+l) print("lines "+l)
fields = l.split(CSV_DELIMITER) fields = l.split(CSV_DELIMITER)
@ -121,49 +140,64 @@ def getCsvSpec(msg, filename, type):
data[a[0]][a[1]] = fields[1] data[a[0]][a[1]] = fields[1]
continue continue
elif (a[0].lower() in CSV_HEADER_START): elif (a[0].lower() in CSV_HEADER_START):
# create deep structure a_0 ... a_n
print("tdata 136 CSV_HEADER_START "+str(len(a)))
if (status == CSV_SPECTYPE_DATA):
setSpecDict(msg, h, data, tableDict)
#CSV_SPECTYPE_DATA
#CSV_SPECTYPE_TREE
#CSV_SPECTYPE_KEYS
tableDict = {}
tableDict[B.DATA_NODE_HEADER] = []
if type == CSV_SPECTYPE_DATA:
tableDict[B.DATA_NODE_DATA] = []
else:
tableDict[B.DATA_NODE_DATA] = []
#CSV_SPECTYPE_DATA
#CSV_SPECTYPE_TREE
#CSV_SPECTYPE_KEYS
# save header in separate structure
i = 0 i = 0
for f in fields: for f in fields:
i += 1 i += 1
if i == 1: continue if i <= 1: continue
header.append(f) header.append(f)
status = CSV_SPECTYPE_DATA status = CSV_SPECTYPE_DATA
h = a
continue continue
elif (status == CSV_SPECTYPE_DATA): elif (status == CSV_SPECTYPE_DATA):
# check A-col for substructure # check A-col for substructure
if (a[0] not in data):
data[a[0]] = {}
if len(a) == 1 and type == CSV_SPECTYPE_DATA:
data[a[0]][B.DATA_NODE_DATA] = []
# its a component
if len(a) > 1 and a[1] not in data[a[0]]:
data[a[0]][a[1]] = {}
if len(a) == 2 and type == CSV_SPECTYPE_DATA:
data[a[0]][a[1]]["_data"] = []
if len(a) > 2 and a[2] not in data[a[0]][a[1]]:
data[a[0]][a[1]][a[2]] = {}
if len(a) == 3 and type == CSV_SPECTYPE_DATA:
data[a[0]][a[1]][a[1]][B.DATA_NODE_DATA] = []
# fill data # fill data
row = {} row = {}
i = 1 i = 1
# case-differentiation DATA or TREE
for f in header: for f in header:
row[f] = fields[i] row[f] = fields[i]
i += 1 i += 1
if len(a) == 1 and type == CSV_SPECTYPE_DATA: if type == CSV_SPECTYPE_TREE:
data[a[0]][B.DATA_NODE_DATA].append(row) tableDict[B.DATA_NODE_DATA][f] = fields[i]
elif len(a) == 1 and type == CSV_SPECTYPE_DATA: if type == CSV_SPECTYPE_DATA:
data[a[0]] = {f: row} tableDict[B.DATA_NODE_DATA].append(row)
elif len(a) == 2 and type == CSV_SPECTYPE_DATA: if (status == CSV_SPECTYPE_DATA):
data[a[0]][a[1]][B.DATA_NODE_DATA].append(row) setSpecDict(msg, h, data, tableDict)
elif len(a) == 1 and type == CSV_SPECTYPE_DATA:
data[a[0]][a[1]][B.DATA_NODE_DATA] = {f: row}
elif len(a) == 3 and type == CSV_SPECTYPE_DATA:
data[a[0]][a[1]][a[2]] = row
elif len(a) == 1 and type == CSV_SPECTYPE_DATA:
data[a[0]][a[1]][a[2]] = {f: row}
print("return getCsvSpec "+str(data)) print("return getCsvSpec "+str(data))
return data return data
def setSpecDict(msg, path, data, tabledata):
if len(path) >= 2 and path[1] not in data[path[0]]:
data[path[0]][path[1]] = {}
if len(path) >= 3 and path[2] not in data[path[0]][path[1]]:
data[path[0]][path[1]][path[2]] = {}
if len(path) >= 4 and path[3] not in data[path[0]][path[1]][path[2]]:
data[path[0]][path[1]][path[2]][path[3]] = {}
if len(path) == 2:
data[path[0]][path[1]] = tabledata
elif len(path) == 3:
data[path[0]][path[1]][path[2]] = tabledata
elif len(path) == 4:
data[path[0]][path[1]][path[2]][path[3]] = tabledata
pass
def readCsv(msg, filename, comp): def readCsv(msg, filename, comp):
job = basic.program.Job.getInstance() job = basic.program.Job.getInstance()
verify = -1+job.getDebugLevel("tdata_tool") verify = -1+job.getDebugLevel("tdata_tool")

Loading…
Cancel
Save