Browse Source

Refactoring: Constants

master
Ulrich Carmesin 2 years ago
parent
commit
e1d2ba176f
  1. 2
      basic/componentHandling.py
  2. 22
      basic/constants.py
  3. 4
      basic/program.py
  4. 4
      test/test_compare.py
  5. 39
      test/test_job.py
  6. 6
      utils/api_abstract.py
  7. 4
      utils/api_const.py
  8. 2
      utils/cli_abstract.py
  9. 9
      utils/cli_const.py
  10. 5
      utils/config_tool.py
  11. 8
      utils/conn_tool.py
  12. 28
      utils/data_const.py
  13. 43
      utils/db_abstract.py
  14. 2
      utils/dbcsv_tool.py
  15. 4
      utils/dbmysql_tool.py
  16. 2
      utils/dbsfile_tool.py
  17. 2
      utils/dbshive_tool.py
  18. 10
      utils/file_tool.py
  19. 122
      utils/match_const.py
  20. 133
      utils/match_tool.py
  21. 2
      utils/report_tool.py
  22. 86
      utils/tdata_tool.py

2
basic/componentHandling.py

@ -116,7 +116,7 @@ class ComponentManager:
elif (init != "N"):
return ComponentManager()
else:
raise Exception("Klasse noch nicht initialisiert")
raise Exception(B.EXCEPT_NOT_INITIALIZED)
def createComponent(self, componentName, nr, suffix):
"""

22
basic/constants.py

@ -17,8 +17,13 @@ The constants desribes the keywords of the main datastructures, these are
it represents the application-knowledge and the knowledge of the application-installation
* test-specification with testdata - stored in external testdata-folder
* the internal datastructure
"""
EXCEPT_NOT_IMPLEMENT = "method is not implemented"
EXCEPT_NOT_INITIALIZED = "class is not initialized"
PAR_APP = 'application'
""" definition of the application which will be tested """
PAR_ENV = 'environment'
@ -68,8 +73,23 @@ DATA_NODE_DDL = "ddl"
DATA_NODE_COMP = "comp"
""" This constant defines """
DATA_NODE_PAR = "par"
DATA_NODE_TYPE = "type"
TYPE_STRING = "string"
TYPE_INT = "int"
TYPE_FLOAT = "float"
TYPE_DOUBLE = "double"
TYPE_DATE = "date"
TYPE_TIME = "time"
SVAL_YES = "y"
SVAL_NO = "n"
SVAL_NULL = "null"
""" This constant defines """
TOPIC_NODE_DB = "db"
ATTR_DB_PARTITION = "partitioned"
""" optional attribute if table is partitioned
- this keyword delimited by "+" will be replaced by partition-names which are parametrized """

4
basic/program.py

@ -160,7 +160,7 @@ class Job:
if not hasattr(self.par, jobdef[program]["dirname"]):
setattr(self.par, jobdef[program]["dirname"],
utils.path_tool.composePattern("{"+basedir+"}", None))
self.par.setParameterLoaded()
self.par.setParameterLoaded()
def getInstance():
if (Job.__instance is not None):
@ -214,7 +214,7 @@ class Job:
if len(str(jobdef[self.program]["pfilesource"])) < 2:
return None
parpath = utils.path_tool.composePath(jobdef[self.program]["pfilesource"], None)
if not os.path.join(parpath):
if not os.path.join(parpath):
return None
doc = utils.file_tool.readFileDict(parpath, self.m)
for k in doc.keys():

4
test/test_compare.py

@ -4,7 +4,7 @@ import unittest
import basic
from basic.program import Job
import utils.match_tool
import utils.match_tool as M
import utils.match_const as M
import components.component
tdata = {
@ -90,7 +90,7 @@ class MyTestCase(unittest.TestCase):
comp.conf = conf
comp.name = "component"
matching = utils.match_tool.Matching(comp)
matching.setData(tdata, utils.match_tool.MATCH_SUCCESS)
matching.setData(tdata, M.MATCH_SUCCESS)
print(matching.htmltext)
def test_hitmanage(self):

39
test/test_job.py

@ -3,50 +3,63 @@ import os
from basic.program import Job
from basic.componentHandling import ComponentManager
import init_testcase
import test_executer
import test.constants
HOME_PATH = test.constants.HOME_PATH
PYTHON_CMD = "python"
class MyTestCase(unittest.TestCase):
def runTest(self):
self.test_parameter()
self.test_components()
#self.test_parameter()
#self.test_components()
self.test_run()
def test_parameter(self):
def xtest_parameter(self):
job = Job("unit")
args = { "application" : "TEST" , "environment" : "ENV01", "modus" : "unit", "loglevel" : "debug", "tool" : "job_tool"}
args = { "application" : "TEST" , "environment" : "ENV01", "modus" : "unit", "loglevel" : "debug",
"tool" : "job_tool", "function": "reset_TData,load_TData" }
job.par.setParameterArgs(args)
self.assertEqual(job.hascomponente("TestA"), True)
self.assertEqual(job.hasTool("TestA"), False)
self.assertEqual(job.hasTool("job_tool"), True)
self.assertEqual(job.getDebugLevel("file_tool"), 23)
self.assertEqual(job.getDebugLevel("job_tool"), 23)
self.assertEqual(job.hasFunction("reset_TData"), True)
self.assertEqual(job.hasFunction("load_TData"), True)
self.assertEqual(job.hasFunction("read_TData"), False)
args = { "application" : "TEST" , "environment" : "ENV01", "modus" : "unit", "loglevel" : "debug",
"tool" : "job_tool", "tsdir": os.path.join(HOME_PATH, "test", "lauf", "V0.1", "startjob", "2021-08-21_18-ß2-01")}
job.par.setParameterArgs(args)
def test_components(self):
def xtest_components(self):
print("# # # # tetsComponents # # # # #")
job = Job.resetInstance("unit")
args = { "application" : "TEST" , "environment" : "ENV01", "modus" : "unit", "loglevel" : "debug", "tool" : "job_tool"}
job.par.setParameterArgs(args)
cm = ComponentManager()
cm.createComponents("testb", 0, "")
cm.createComponents("testa", 1, "")
cm.createComponent("testb", 0, "")
cm.createComponent("testa", 1, "")
def test_run(self):
# os.system("python "+os.path.join(HOME_PATH, "check_environment.py")+" -a TEST -e ENV01")
# os.system("python "+os.path.join(HOME_PATH, "init_testset.py")+" -a TEST -e ENV01 "
# os.system("python "+os.path.join(HOME_PATH, "init_testsuite.py")+" -a TEST -e ENV01 "
# "-ts "+os.path.join(HOME_PATH, "test","lauf","V0.1","implement_2021-08-28_23-50-51")+" -dt csv -ds implement -dn firstunit")
#os.system(PYTHON_CMD+" "+os.path.join(HOME_PATH,"init_testcase.py")+" -a TEST -e ENV01 "
# "-tc "+os.path.join(HOME_PATH,"test","lauf","V0.1","TC0001","2021-08-28_23-50-51")+" -dt csv -ds implement -dn TC0001")
#args = { "application": "TEST", "environment": "ENV01", "modus": "unit",
# "tool": "job_tool", "tsdir": os.path.join(HOME_PATH,"test","conf","lauf","V0.1","TC0001_2021-08-28_23-50-51")}
#"loglevel": "debug", "tdtyp": "dir",
# "tdsrc": "TC0001", "tdname": "xxx",
job = Job("unit")
args = { "application": "TEST", "environment": "ENV01", "modus": "unit", "loglevel": "debug", "tdtyp": "dir",
"tdsrc": "TC0001", "tdname": "xxx",
"tool": "job_tool", "tcdir": os.path.join(HOME_PATH,"test","lauf","V0.1","TC0001","2021-08-28_23-50-51")}
args = { "application": "TEST", "environment": "ENV01", "modus": "unit", "tstime": "2022-03-19_12-09-09",
"tsdir": '/home/ulrich/6_Projekte/Programme/datest/test/conf/lauf/testlauf/TST001_2022-03-19_12-09-09',
"step": 2 }
# "usecase": "TST001", "tstime": "2022-03-17_17-28"}
job.par.setParameterArgs(args)
job.setProgram("init_testcase")
init_testcase.start(job)
job.setProgram("test_executer")
# init_testcase.start(job)
job.startJob()
test_executer.start(job)
job.stopJob(1)
if __name__ == '__main__':

6
utils/api_abstract.py

@ -34,14 +34,14 @@ class ApiFcts():
def startCommand(self, comp, args):
""" method to execute the statement
this method should only called by the class itself """
raise Exception("method is not implemented")
raise Exception(B.EXCEPT_NOT_IMPLEMENT)
def statusCommand(self, comp, args):
""" method to execute the statement
this method should only called by the class itself """
raise Exception("method is not implemented")
raise Exception(B.EXCEPT_NOT_IMPLEMENT)
def stopCommand(self, comp, args):
""" method to execute the statement
this method should only called by the class itself """
raise Exception("method is not implemented")
raise Exception(B.EXCEPT_NOT_IMPLEMENT)

4
utils/api_const.py

@ -0,0 +1,4 @@
#!/usr/bin/python
"""
constants for used for api-functions
"""

2
utils/cli_abstract.py

@ -38,4 +38,4 @@ class CliFcts():
def execCommand(self, comp, command):
""" method to execute the statement
this method should only called by the class itself """
raise Exception("method is not implemented")
raise Exception(B.EXCEPT_NOT_IMPLEMENT)

9
utils/cli_const.py

@ -0,0 +1,9 @@
#!/usr/bin/python
"""
constants for used for api-functions
"""
DEFAULT_DB_PARTITION = "n"
""" attribute if table is partitioned - partitions are parametrized """
DEFAULT_DB_CONN_JAR = "n"
""" attribute for connection-jar-file instead of connection by ip, port """

5
utils/config_tool.py

@ -17,9 +17,10 @@ import utils.path_tool
import utils.file_tool
import os.path
import basic.constants as B
import utils.data_const as D
COMP_FILES = ["DATASTRUCTURE"]
CONFIG_FORMAT = ["yml", "json", "csv"]
COMP_FILES = [D.DDL_FILENAME]
CONFIG_FORMAT = [D.DFILE_TYPE_YML, D.DFILE_TYPE_JSON, D.DFILE_TYPE_CSV]
def getConfigPath(modul, name, subname=""):
"""

8
utils/conn_tool.py

@ -12,7 +12,7 @@ def getConnection(comp, nr):
job = basic.program.Job.getInstance()
verify = job.getDebugLevel("conn_tool")
conn = {}
if job.conf.confs.get("tools").get("connsrc") == "yml":
if job.conf.confs.get("tools").get("connsrc") == D.DFILE_TYPE_YML:
conn = utils.config_tool.getConfig("tool", "conn")
xtypes = None
if ("types" in conn["env"][comp]):
@ -26,7 +26,7 @@ def getConnection(comp, nr):
job.m.setFatal("Conn-Tool: Comp not configured " + comp + " " + str(nr))
elif job.conf.confs.get("tools").get("connsrc") == "flaskdb":
pass
elif job.conf.confs.get("tools").get("connsrc") == "csv":
elif job.conf.confs.get("tools").get("connsrc") == D.DFILE_TYPE_CSV:
pass
return None
@ -37,13 +37,13 @@ def getConnections(comp):
print("getConnections " + comp)
conn = {}
conns = []
if job.conf.confs.get("tools").get("connsrc") == "yml":
if job.conf.confs.get("tools").get("connsrc") == D.DFILE_TYPE_YML:
conn = utils.config_tool.getConfig("tool", "conn")
if not comp in conn["env"]:
job.m.setFatal("Conn-Tool: Comp not configured " + comp)
elif job.conf.confs.get("tools").get("connsrc") == "flaskdb":
pass
elif job.conf.confs.get("tools").get("connsrc") == "csv":
elif job.conf.confs.get("tools").get("connsrc") == D.DFILE_TYPE_CSV:
pass
#print(comp)
#print(conn["env"].keys())

28
utils/data_const.py

@ -0,0 +1,28 @@
#!/usr/bin/python
"""
constants for used for api-functions
"""
DDL_FILENAME = "DATASTRUCTURE"
DDL_FNULLABLE = "nullable"
DFILE_TYPE_YML = "yml"
DFILE_TYPE_JSON = "json"
DFILE_TYPE_CSV = "csv"
DATA_SRC_DIR = "dir"
DATA_SRC_CSV = "csv"
CSV_HEADER_START = ["node", "table", "tabelle"]
CSV_DELIMITER = ";"
CSV_SPECTYPE_DATA = "data"
CSV_SPECTYPE_TREE = "tree"
CSV_SPECTYPE_KEYS = "keys"
CSV_SPECTYPE_CONF = "conf"
CSV_NODETYPE_KEYS = "_keys"
ATTR_SRC_TYPE = "tdtyp"
ATTR_SRC_DATA = "tdsrc"
ATTR_SRC_NAME = "tdname"

43
utils/db_abstract.py

@ -41,12 +41,9 @@ SPECIAL CASES:
import basic.program
import utils.config_tool
import basic.constants as B
import utils.data_const as D
import os
DEFAULT_DB_PARTITION = "n"
""" attribute if table is partitioned - partitions are parametrized """
DEFAULT_DB_CONN_JAR = "n"
""" attribute for connection-jar-file instead of connection by ip, port """
def getDbAttributes(comp, table):
@ -60,8 +57,8 @@ def getDbAttributes(comp, table):
B.ATTR_DB_DATABASE: "",
B.ATTR_DB_SCHEMA: "",
B.ATTR_DB_TABNAME: "",
B.ATTR_DB_PARTITION: DEFAULT_DB_PARTITION,
B.ATTR_DB_CONN_JAR: DEFAULT_DB_CONN_JAR
B.ATTR_DB_PARTITION: D.DEFAULT_DB_PARTITION,
B.ATTR_DB_CONN_JAR: D.DEFAULT_DB_CONN_JAR
}
for attr in out.keys():
print(attr)
@ -113,8 +110,8 @@ class DbFcts():
def xxgetDbAttributes(self, table):
out = {
B.ATTR_DB_TABNAME: "",
B.ATTR_DB_PARTITION: DFLT_DB_PARTITION,
B.ATTR_DB_CONN_JAR: DFLT_DB_CONN_JAR
B.ATTR_DB_PARTITION: D.DFLT_DB_PARTITION,
B.ATTR_DB_CONN_JAR: D.DFLT_DB_CONN_JAR
}
for attr in out.keys():
print(attr)
@ -155,7 +152,7 @@ class DbFcts():
def selectRows(self, statement):
""" method to select rows from a database
statement written in sql """
raise Exception("method is not implemented")
raise Exception(B.EXCEPT_NOT_IMPLEMENT)
def deleteTables(self):
""" method to delete rows from a database
@ -168,17 +165,17 @@ class DbFcts():
def deleteRows(self, table):
""" method to delete rows from a database
statement written in sql """
raise Exception("method is not implemented")
raise Exception(B.EXCEPT_NOT_IMPLEMENT)
def updateRows(self, statement):
""" method to delete rows from a database
statement written in sql """
raise Exception("method is not implemented")
raise Exception(B.EXCEPT_NOT_IMPLEMENT)
def getConnector(self):
""" add-on-method to get the connector
this method should only called by the class itself """
raise Exception("method is not implemented")
raise Exception(B.EXCEPT_NOT_IMPLEMENT)
def insertTables(self, tdata):
""" method to insert rows into a database
@ -194,12 +191,12 @@ class DbFcts():
""" method to insert rows into a database
the rows will be interpreted by the ddl of the component
"""
raise Exception("method is not implemented")
raise Exception(B.EXCEPT_NOT_IMPLEMENT)
def execStatement(self, statement):
""" add-on-method to execute the statement
this method should only called by the class itself """
raise Exception("method is not implemented")
raise Exception(B.EXCEPT_NOT_IMPLEMENT)
def loadDdl(self):
"""" load the DDL for each database-table
@ -207,7 +204,7 @@ class DbFcts():
job = basic.program.Job.getInstance()
if (B.DATA_NODE_DDL in self.comp.conf):
return
conf = utils.config_tool.getConfig("DATASTRUCTURE", self.comp.name)
conf = utils.config_tool.getConfig(D.DDL_FILENAME, self.comp.name)
self.comp.conf[B.DATA_NODE_DDL] = {}
for k in conf[self.comp.name]:
self.comp.conf[B.DATA_NODE_DDL][k] = conf[self.comp.name][k]
@ -219,19 +216,19 @@ class DbFcts():
return ""
def getDbValue(self, fo, value):
if len(value.strip()) == 0 and fo["nullable"] == "y":
if len(value.strip()) == 0 and fo[D.DDL_FNULLABLE] == B.SVAL_YES:
return self.getDbNull()
if fo["type"] == "string":
if fo[B.DATA_NODE_TYPE] == B.TYPE_STRING:
return "'"+value.strip()+"'"
elif fo["type"] == "int":
elif fo[B.DATA_NODE_TYPE] == B.TYPE_INT:
return value.strip()
elif fo["type"] == "double":
elif fo[B.DATA_NODE_TYPE] == B.TYPE_DOUBLE:
return self.getDbDouble(value)
elif fo["type"] == "float":
elif fo[B.DATA_NODE_TYPE] == B.TYPE_FLOAT:
return self.getDbFloat(value)
elif fo["type"] == "date":
elif fo[B.DATA_NODE_TYPE] == B.TYPE_DATE:
return self.getDbDate(value)
elif fo["type"] == "time":
elif fo[B.DATA_NODE_TYPE] == B.TYPE_TIME:
return self.getDbTime(value)
def getDbDouble(self, value):
@ -243,4 +240,4 @@ class DbFcts():
def getDbTime(self, value):
return value
def getDbNull(self):
return "null"
return B.SVAL_NULL

2
utils/dbcsv_tool.py

@ -36,7 +36,7 @@ class DbFcts(utils.db_abstract.DbFcts):
def updateRows(self, statement):
""" method to delete rows from a database
statement written in sql """
raise Exception("method is not implemented")
raise Exception(B.EXCEPT_NOT_IMPLEMENT)
def insertRows(self, table, rows):
""" method to insert rows into a database

4
utils/dbmysql_tool.py

@ -49,7 +49,7 @@ class DbFcts(utils.db_abstract.DbFcts):
def updateRows(self, statement):
""" method to delete rows from a database
statement written in sql """
raise Exception("method is not implemented")
raise Exception(B.EXCEPT_NOT_IMPLEMENT)
def insertRows(self, table, rows):
""" method to insert rows into a database
@ -96,7 +96,7 @@ class DbFcts(utils.db_abstract.DbFcts):
def execStatement(self, comp, conn, statement):
""" add-on-method to execute the statement
this method should only called by the class itself """
raise Exception("method is not implemented")
raise Exception(B.EXCEPT_NOT_IMPLEMENT)

2
utils/dbsfile_tool.py

@ -87,7 +87,7 @@ class DbFcts(utils.db_abstract.DbFcts):
""" add-on-method to get the connector
this method should only called by the class itself """
job = basic.program.Job.getInstance()
attr = self.getDbAttributes("null")
attr = self.getDbAttributes(B.SVAL_NULL)
spark = None
if B.ATTR_DB_CONN_JAR in attr:
spark = pyspark.SparkSession\

2
utils/dbshive_tool.py

@ -92,7 +92,7 @@ class DbFcts(utils.db_abstract.DbFcts):
""" add-on-method to get the connector
this method should only called by the class itself """
job = basic.program.Job.getInstance()
attr = self.getDbAttributes("null")
attr = self.getDbAttributes(B.SVAL_NULL)
spark = None
if B.ATTR_DB_CONN_JAR in attr:
connectorJar = os.environ.get(attr[B.ATTR_DB_CONN_JAR])

10
utils/file_tool.py

@ -170,15 +170,15 @@ def readFileDict(path, msg):
if not os.path.exists(path):
return doc
enc = detectFileEncode(path, msg)
if "yml" in path[-5:]:
if D.DFILE_TYPE_YML in path[-5:]:
with open(path, 'r', encoding=enc) as file:
doc = yaml.full_load(file)
file.close()
elif "json" in path[-5:]:
elif D.DFILE_TYPE_JSON in path[-5:]:
with open(path, 'r', encoding=enc) as file:
doc = json.load(file)
file.close()
elif "csv" in path[-5:]:
elif D.DFILE_TYPE_CSV in path[-5:]:
doc = utils.tdata_tool.getCsvSpec(msg, path, "conf")
return doc
@ -193,11 +193,11 @@ def writeFileText(msg, path, text, enc="utf-8"):
def writeFileDict(msg, path, dict, enc="utf-8"):
job = basic.program.Job.getInstance()
mkPaths(path, msg)
if "yml" in path[-5:]:
if D.DFILE_TYPE_YML in path[-5:]:
with open(path, 'r', encoding=enc) as file:
doc = yaml.dump(dict, file)
file.close()
elif "json" in path[-5:]:
elif D.DFILE_TYPE_JSON in path[-5:]:
with open(path, 'w', encoding=enc) as file:
doc = json.dumps(file, indent=4)
file.write(doc)

122
utils/match_const.py

@ -0,0 +1,122 @@
#!/usr/bin/python
"""
constants for used for api-functions
"""
MATCH_SIDE_PREEXPECT = "preexpect"
""" it implies the precondition of the expectation """
MATCH_DICT_PREEXPECT = {
"short": "SV",
"long": "Soll-Vorher",
"filepattern": "rsprecond"
}
MATCH_SIDE_POSTEXPECT = "postexpect"
""" it implies the postcondition of the expectation - it is the expectation"""
MATCH_DICT_POSTEXPECT = {
"short": "SN",
"long": "Soll-Nachher",
"filepattern": "rsprecond"
}
MATCH_SIDE_PREACTUAL = "preactual"
""" it implies the precondition of the actual execution """
MATCH_DICT_PREACTUAL = {
"short": "IV",
"long": "Ist-Vorher",
"filepattern": "rsprecond"
}
MATCH_SIDE_POSTACTUAL = "postactual"
""" it implies the postondition of the actual execution - it is the result """
MATCH_DICT_POSTACTUAL = {
"short": "IN",
"long": "Ist-Nachher",
"filepattern": "rsprecond"
}
MATCH_SIDE_PRESTEP = "prestep"
""" it implies the postcondition of a preceding step of the actual execution - the preceding step must be configured in the component"""
MATCH_DICT_PRESTEP = {
"short": "VS",
"long": "Vorhergehender Schritt (Nachher)",
"filepattern": "rsprecond"
}
MATCH_SIDE_TESTCASE = "testexample"
""" it implies the postcondition of an exemplary testcase - the exemplary testcase must be parametrized """
MATCH_DICT_TESTCASE = {
"short": "VT",
"long": "Vergleichstestfall (Nachher)",
"filepattern": "rsprecond"
}
MATCH_SIDES = [MATCH_SIDE_PREEXPECT, MATCH_SIDE_POSTEXPECT, MATCH_SIDE_PREACTUAL, MATCH_SIDE_POSTACTUAL, MATCH_SIDE_PRESTEP, MATCH_SIDE_TESTCASE]
MATCH_SUCCESS = "success"
""" matches the action between pre- and postcondition of the actual testexecution """
MATCH_PRECOND = "preconditions"
""" matches the preconditions betwenn the required result the the actual testexecution
- just for info if the both executions have the same precondition """
MATCH_POSTCOND = "postconditions"
""" matches the postconditions betwenn the required result the the actual testexecution
- it is the main comparison """
MATCH_PRESTEP = "prestep"
MATCH_TESTEXAMPLE = "testeample"
MATCH_TYPES = [MATCH_PRECOND, MATCH_PRESTEP, MATCH_TESTEXAMPLE, MATCH_SUCCESS, MATCH_POSTCOND]
MATCH = {
MATCH_SIDE_PREEXPECT: MATCH_DICT_PREEXPECT,
MATCH_SIDE_POSTEXPECT: MATCH_DICT_POSTEXPECT,
MATCH_SIDE_PREACTUAL: MATCH_DICT_PREACTUAL,
MATCH_SIDE_POSTACTUAL: MATCH_DICT_POSTACTUAL,
MATCH_SIDE_PRESTEP: MATCH_DICT_PRESTEP,
MATCH_SIDE_TESTCASE: MATCH_DICT_TESTCASE,
MATCH_PRECOND: {
"A": MATCH_SIDE_PREEXPECT,
"B": MATCH_SIDE_PREACTUAL,
"shortA": "SV",
"shortB": "IV",
"longA": "Soll-Vorher",
"longB": "Ist-Vorher",
"mode": "info",
"filename": "01_Vorbedingungen",
"title": "Pruefung Vorbedingung (Soll-Vorher - Ist-Vorher)"
},
MATCH_POSTCOND: {
"A": MATCH_SIDE_POSTEXPECT,
"B": MATCH_SIDE_POSTACTUAL,
"shortA": "SN",
"shortB": "IN",
"longA": "Soll-Nachher",
"longB": "Ist-Nachher",
"mode": "hard",
"filename": "00_Fachabgleich",
"title": "Fachliche Auswertung (Soll-Nachher - Ist-Nachher)"
},
MATCH_SUCCESS: {
"A": MATCH_SIDE_PREACTUAL,
"B": MATCH_SIDE_POSTACTUAL,
"shortA": "IV",
"shortB": "IN",
"longA": "Ist-Vorher",
"longB": "Ist-Nachher",
"mode": "action",
"filename": "04_Ablauf",
"title": "Ablauf-Differenz (Ist-Vorher - Ist-Nachher)"
},
MATCH_PRESTEP: {
"A": MATCH_SIDE_PRESTEP,
"B": MATCH_SIDE_POSTACTUAL,
"shortA": "VN",
"shortB": "IN",
"longA": "Vor-Schritt",
"longB": "Ist-Nachher",
"mode": "action",
"filename": "02_Vorschritt",
"title": "Schritt-Differenz (Vorschritt-Nachher - Ist-Nachher)"
},
MATCH_TESTEXAMPLE: {
"A": MATCH_SIDE_TESTCASE,
"B": MATCH_SIDE_POSTACTUAL,
"shortA": "TN",
"shortB": "IN",
"longA": "Vergleich-Soll",
"longB": "Ist-Nachher",
"mode": "action",
"filename": "03_Vergleichstestfall",
"title": "Vergleichstestfall (Vergleich-Soll - Ist-Nachher)"
},
}

133
utils/match_tool.py

@ -9,108 +9,11 @@ import utils.css_tool
import utils.report_tool
import basic.program
import basic.constants as B
import utils.match_const as M
# ------------------------------------------------------------
"""
"""
MATCH_SIDE_PREEXPECT = "preexpect"
""" it implies the precondition of the expectation """
MATCH_DICT_PREEXPECT = {
"short": "SV",
"long": "Soll-Vorher",
"filepattern": "rsprecond"
}
MATCH_SIDE_POSTEXPECT = "postexpect"
""" it implies the postcondition of the expectation - it is the expectation"""
MATCH_DICT_POSTEXPECT = {
"short": "SN",
"long": "Soll-Nachher",
"filepattern": "rspostcond"
}
MATCH_SIDE_PREACTUAL = "preactual"
""" it implies the precondition of the actual execution """
MATCH_DICT_PREACTUAL = {
"short": "IV",
"long": "Ist-Vorher",
"filepattern": "tcprecond"
}
MATCH_SIDE_POSTACTUAL = "postactual"
""" it implies the postondition of the actual execution - it is the result """
MATCH_DICT_POSTACTUAL = {
"short": "IN",
"long": "Ist-Nachher",
"filepattern": "tcpostcond"
}
MATCH_SIDE_PRESTEP = "preside"
""" it implies the postcondition of a preceding step of the actual execution - the preceding step must be configured in the component"""
MATCH_DICT_PRESTEP = {
"short": "VS",
"long": "Vorhergehender Schritt (Nachher)",
"filepattern": "rspostcond"
}
MATCH_SIDE_TESTCASE = "testexample"
""" it implies the postcondition of an exemplary testcase - the exemplary testcase must be parametrized """
MATCH_DICT_TESTCASE = {
"short": "VT",
"long": "Vergleichstestfall (Nachher)",
"filepattern": "rspostcond"
}
MATCH_SIDES = [MATCH_SIDE_PREEXPECT, MATCH_SIDE_POSTEXPECT, MATCH_SIDE_PREACTUAL, MATCH_SIDE_POSTACTUAL, MATCH_SIDE_PRESTEP, MATCH_SIDE_TESTCASE]
MATCH_SUCCESS = "success"
""" matches the action between pre- and postcondition of the actual testexecution """
MATCH_PRECOND = "preconditions"
""" matches the preconditions betwenn the required result the the actual testexecution
- just for info if the both executions have the same precondition """
MATCH_POSTCOND = "postconditions"
""" matches the postconditions betwenn the required result the the actual testexecution
- it is the main comparison """
MATCH_PRESTEP = "prestep"
MATCH_TESTEXAMPLE = "testeample"
MATCH_TYPES = [MATCH_PRECOND, MATCH_PRESTEP, MATCH_TESTEXAMPLE, MATCH_SUCCESS, MATCH_POSTCOND]
MATCH = {
MATCH_SIDE_PREEXPECT : MATCH_DICT_PREEXPECT,
MATCH_SIDE_POSTEXPECT : MATCH_DICT_POSTEXPECT,
MATCH_SIDE_PREACTUAL : MATCH_DICT_PREACTUAL,
MATCH_SIDE_POSTACTUAL : MATCH_DICT_POSTACTUAL,
MATCH_SIDE_PRESTEP : MATCH_DICT_PRESTEP,
MATCH_SIDE_TESTCASE : MATCH_DICT_TESTCASE,
MATCH_PRECOND: {
"A": MATCH_SIDE_PREEXPECT,
"B": MATCH_SIDE_PREACTUAL,
"mode": "info",
"filename": "01_Vorbedingungen",
"title": "Pruefung Vorbedingung (Soll-Vorher - Ist-Vorher)"
},
MATCH_POSTCOND: {
"A": MATCH_SIDE_POSTEXPECT,
"B": MATCH_SIDE_POSTACTUAL,
"mode": "hard",
"filename": "00_Fachabgleich",
"title": "Fachliche Auswertung (Soll-Nachher - Ist-Nachher)"
},
MATCH_SUCCESS: {
"A": MATCH_SIDE_PREACTUAL,
"B": MATCH_SIDE_POSTACTUAL,
"mode": "action",
"filename": "04_Ablauf",
"title": "Ablauf-Differenz (Ist-Vorher - Ist-Nachher)"
},
MATCH_PRESTEP: {
"A": MATCH_SIDE_PRESTEP,
"B": MATCH_SIDE_POSTACTUAL,
"mode": "action",
"filename": "02_Vorschritt",
"title": "Schritt-Differenz (Vorschritt-Nachher - Ist-Nachher)"
},
MATCH_TESTEXAMPLE: {
"A": MATCH_SIDE_TESTCASE,
"B": MATCH_SIDE_POSTACTUAL,
"mode": "action",
"filename": "03_Vergleichstestfall",
"title": "Schritt-Differenz (Vorschritt-Nachher - Ist-Nachher)"
},
}
class Matching():
def __init__(self, comp):
@ -136,14 +39,14 @@ class Matching():
:param match: kind of actual match
:return:
"""
sideA = MATCH[match]["A"]
sideB = MATCH[match]["B"]
sideA = M.MATCH[match]["A"]
sideB = M.MATCH[match]["B"]
self.sideA = tdata[sideA]["data"]
self.sideB = tdata[sideB]["data"]
self.matchfiles["A"] = tdata[sideA]["path"]
self.matchfiles["B"] = tdata[sideB]["path"]
self.matchtype = match
self.mode = MATCH[match]["mode"]
self.mode = M.MATCH[match]["mode"]
self.setDiffHeader()
self.report = utils.report_tool.Report.getInstance()
self.resetHits()
@ -160,13 +63,13 @@ class Matching():
self.cssClass = cssClass
def isHitA(self, key):
return ((key in self.linksA) and (self.linksA[key] != "null"))
return ((key in self.linksA) and (self.linksA[key] != B.SVAL_NULL))
def isHitB(self, key):
return ((key in self.linksB) and (self.linksB[key] != "null"))
return ((key in self.linksB) and (self.linksB[key] != B.SVAL_NULL))
def setHit(self, keyA, keyB):
if (not self.isHitA(keyA)) and (not self.isHitB(keyB)):
if (keyA != "null"): self.linksA[keyA] = keyB
if (keyB != "null"): self.linksB[keyB] = keyA
if (keyA != B.SVAL_NULL): self.linksA[keyA] = keyB
if (keyB != B.SVAL_NULL): self.linksB[keyB] = keyA
return "OK"
raise Exception("one of the links are set")
def setNohit(self, similarity, keyA, keyB):
@ -190,13 +93,13 @@ class Matching():
job.debug(verify, "getDiffHeader ")
htmltxt = "<!DOCTYPE html>"
htmltxt += "<html><head>"
htmltxt += "<title>"+MATCH[matching.matchtype]["title"]+"</title>"
htmltxt += "<title>"+M.MARCH[matching.matchtype]["title"]+"</title>"
htmltxt += utils.css_tool.getInternalStyle("diffFiles")
htmltxt += "</head>"
htmltxt += "<body>"
htmltxt += "<h1>"+MATCH[matching.matchtype]["title"]+"</h1>"
htmltxt += "<h4>"+MATCH[MATCH[matching.matchtype]["A"]]["long"]+": "+matching.matchfiles["A"]+"</h4>"
htmltxt += "<h4>"+MATCH[MATCH[matching.matchtype]["B"]]["long"]+": "+matching.matchfiles["B"]+"</h4><br>"
htmltxt += "<h1>"+M.MARCH[matching.matchtype]["title"]+"</h1>"
htmltxt += "<h4>"+M.MARCH[M.MARCH[matching.matchtype]["A"]]["long"]+": "+matching.matchfiles["A"]+"</h4>"
htmltxt += "<h4>"+M.MARCH[M.MARCH[matching.matchtype]["B"]]["long"]+": "+matching.matchfiles["B"]+"</h4><br>"
matching.htmltext = htmltxt
def setDiffFooter(self):
@ -229,13 +132,13 @@ def matchBestfit(matching, path):
if (matching.sideA is not None):
for r in matching.sideA:
k = composeKey("a", i)
matching.setHit(k, "null")
matching.setHit(k, B.SVAL_NULL)
i += 1
i = 0
if (matching.sideB is not None):
for r in matching.sideB:
k = composeKey("b", i)
matching.setHit("null", k)
matching.setHit(B.SVAL_NULL, k)
i += 1
ia = 0
ix = 1
@ -373,7 +276,7 @@ def getEvaluation(matching, type, acceptance, sideA, sideB):
result = "test"
if match == "99": return ["MATCH", "novalue", "novalue", "novalue", "novalue"]
if acceptance == "ignore": result = "ignore"
if (matching.matchtype == MATCH_POSTCOND) and (result == "test"):
if (matching.matchtype == M.MARCH_POSTCOND) and (result == "test"):
result = "hard"
classA = "diffA"
classB = "diffB"
@ -489,7 +392,7 @@ def markRow(matching, header, row, side):
cssClass = res[2]
text += "<td "+utils.css_tool.getInlineStyle("diffFiles", cssClass)+">"+val+"</td>"
text = "<tr><td "+utils.css_tool.getInlineStyle("diffFiles", cssClass)+">" \
+ MATCH[MATCH[matching.matchtype][side]]["short"] + "</td>"+text+"</tr>"
+ M.MARCH[M.MARCH[matching.matchtype][side]]["short"] + "</td>"+text+"</tr>"
matching.difftext += text
return text
@ -544,8 +447,8 @@ def compareRow(matching, header, rA, rB):
matching.setCssClass("result1")
if allident:
return "<tr><td/>"+textA+"</tr>"
text = "<tr><td>"+MATCH[MATCH[matching.matchtype]["A"]]["short"]+"</td>"+textA+"</tr>"
text += "<tr><td>"+MATCH[matching.matchtype]["shortB"]+"</td>"+textB+"</tr>"
text = "<tr><td>"+M.MARCH[M.MARCH[matching.matchtype]["A"]]["short"]+"</td>"+textA+"</tr>"
text += "<tr><td>"+M.MARCH[matching.matchtype]["shortB"]+"</td>"+textB+"</tr>"
matching.difftext += text
return text

2
utils/report_tool.py

@ -24,7 +24,7 @@ table0 | | each component | each testcase x each component
import os
import re
import basic.program
import utils.match_tool as M
import utils.match_const as M
import basic.constants as B
import utils.css_tool
import utils.path_tool

86
utils/tdata_tool.py

@ -25,34 +25,22 @@ import os.path
import basic.program
import utils.file_tool
import basic.constants as B
import utils.data_const as D
DATA_SRC_DIR = "dir"
DATA_SRC_CSV = "csv"
CSV_HEADER_START = ["node", "table", "tabelle"]
CSV_DELIMITER = ";"
CSV_SPECTYPE_DATA = "data"
CSV_SPECTYPE_TREE = "tree"
CSV_SPECTYPE_KEYS = "keys"
CSV_SPECTYPE_CONF = "conf"
CSV_NODETYPE_KEYS = "_keys"
ATTR_SRC_TYPE = "tdtyp"
ATTR_SRC_DATA = "tdsrc"
ATTR_SRC_NAME = "tdname"
TOOL_NAME = "tdata_tool"
""" name of the tool in order to switch debug-info on """
def getTdataAttr():
job = basic.program.Job.getInstance()
out = {} #
out[ATTR_SRC_TYPE] = DATA_SRC_DIR
out[D.ATTR_SRC_TYPE] = D.DATA_SRC_DIR
print("---getTdataAttr")
print(vars(job.par))
if hasattr(job.par, B.PAR_TESTCASE):
out[ATTR_SRC_NAME] = getattr(job.par, B.PAR_TESTCASE)
out[D.ATTR_SRC_NAME] = getattr(job.par, B.PAR_TESTCASE)
elif hasattr(job.par, B.PAR_TESTSUITE):
out[ATTR_SRC_NAME] = getattr(job.par, B.PAR_TESTSUITE)
for p in [ATTR_SRC_TYPE, ATTR_SRC_DATA, ATTR_SRC_NAME]:
out[D.ATTR_SRC_NAME] = getattr(job.par, B.PAR_TESTSUITE)
for p in [D.ATTR_SRC_TYPE, D.ATTR_SRC_DATA, D.ATTR_SRC_NAME]:
# out[p] = ""
if hasattr(job.par, p):
out[p] = getattr(job.par, p)
@ -73,24 +61,24 @@ def getTestdata():
#criteria = getattr(job.par, "tdname")
tdata = getTdataAttr() # {"reftyp": reftyp, "source": source, "criteria": criteria}
print(tdata)
if tdata[ATTR_SRC_TYPE] == "flaskdb":
if tdata[D.ATTR_SRC_TYPE] == "flaskdb":
# read data-structure with sourcename
# connect to source
# select with all data with datastructure
job.m.setInfo("Test-Data readed from " + tdata[ATTR_SRC_TYPE] + " for " + tdata[ATTR_SRC_NAME])
elif tdata[ATTR_SRC_TYPE] == DATA_SRC_CSV:
job.m.setInfo("Test-Data readed from " + tdata[D.ATTR_SRC_TYPE] + " for " + tdata[D.ATTR_SRC_NAME])
elif tdata[D.ATTR_SRC_TYPE] == D.DATA_SRC_CSV:
# read file in testdata
job.m.logInfo("Test-Data readed from " + tdata[ATTR_SRC_TYPE] + " for " + tdata[ATTR_SRC_NAME])
elif tdata[ATTR_SRC_TYPE] == DATA_SRC_DIR:
filename = os.path.join(job.conf.getJobConf(B.SUBJECT_PATH+":"+B.ATTR_PATH_TDATA), tdata[ATTR_SRC_NAME], "testspec.csv")
data = getCsvSpec(job.m, filename, CSV_SPECTYPE_DATA)
job.m.logInfo("Test-Data readed from " + tdata[D.ATTR_SRC_TYPE] + " for " + tdata[D.ATTR_SRC_NAME])
elif tdata[D.ATTR_SRC_TYPE] == D.DATA_SRC_DIR:
filename = os.path.join(job.conf.getJobConf(B.SUBJECT_PATH+":"+B.D.ATTR_PATH_TDATA), tdata[D.ATTR_SRC_NAME], "testspec.csv")
data = getCsvSpec(job.m, filename, D.CSV_SPECTYPE_DATA)
for k in data:
tdata[k] = data[k]
if (k == "option"):
for p in data[k]:
setattr(job.par, p, data[k][p])
else:
job.m.setFatal("test-Data: reftyp " + tdata[ATTR_SRC_TYPE] + " is not implemented")
job.m.setFatal("test-Data: reftyp " + tdata[D.ATTR_SRC_TYPE] + " is not implemented")
return tdata
def getCsvSpec(msg, filename, type):
@ -120,9 +108,9 @@ def getCsvSpec(msg, filename, type):
tableDict = {}
for l in lines:
print("lines "+l)
fields = l.split(CSV_DELIMITER)
fields = l.split(D.CSV_DELIMITER)
# check empty line, comment
if (len(l.strip().replace(CSV_DELIMITER,"")) < 1):
if (len(l.strip().replace(D.CSV_DELIMITER,"")) < 1):
status = "start"
continue
if (fields[0][0:1] == "#"):
@ -141,13 +129,13 @@ def getCsvSpec(msg, filename, type):
a = fields[3].split(",")
for arg in a:
b = arg.split(":")
step[B.ATTR_STEP_ARGS][b[0]] = b[1]
step[B.D.ATTR_STEP_ARGS][b[0]] = b[1]
data[B.DATA_NODE_STEPS].append(step)
continue
elif (a[0].lower() == "option"):
data[a[0]][a[1]] = fields[1]
continue
elif (a[0].lower() in CSV_HEADER_START):
elif (a[0].lower() in D.CSV_HEADER_START):
# create deep structure a_0 ... a_n
print("tdata 136 CSV_HEADER_START "+str(len(a)))
h = a
@ -161,19 +149,19 @@ def getCsvSpec(msg, filename, type):
break
header.append(f)
tableDict[B.DATA_NODE_HEADER] = header
if type == CSV_SPECTYPE_TREE:
if type == D.CSV_SPECTYPE_TREE:
tableDict[B.DATA_NODE_DATA] = {}
elif type == CSV_SPECTYPE_KEYS:
tableDict[CSV_NODETYPE_KEYS] = {}
elif type == CSV_SPECTYPE_CONF:
elif type == D.CSV_SPECTYPE_KEYS:
tableDict[D.CSV_NODETYPE_KEYS] = {}
elif type == D.CSV_SPECTYPE_CONF:
tableDict = {}
headerFields = []
else:
tableDict[B.DATA_NODE_DATA] = []
setTabContent(msg, data, tableDict, h)
status = CSV_SPECTYPE_DATA
status = D.CSV_SPECTYPE_DATA
continue
elif (status == CSV_SPECTYPE_DATA):
elif (status == D.CSV_SPECTYPE_DATA):
# check A-col for substructure
# fill data
tableDict = getTabContent(msg, data, h)
@ -182,20 +170,20 @@ def getCsvSpec(msg, filename, type):
# case-differentiation DATA or TREE
for f in header:
row[f] = fields[i]
if type == CSV_SPECTYPE_TREE:
if type == D.CSV_SPECTYPE_TREE:
tableDict[B.DATA_NODE_DATA][f] = fields[i]
i += 1
if type == CSV_SPECTYPE_DATA:
if type == D.CSV_SPECTYPE_DATA:
tableDict[B.DATA_NODE_DATA].append(row)
elif type == CSV_SPECTYPE_KEYS:
tableDict[CSV_NODETYPE_KEYS][fields[1]] = row
elif type == CSV_SPECTYPE_CONF:
elif type == D.CSV_SPECTYPE_KEYS:
tableDict[D.CSV_NODETYPE_KEYS][fields[1]] = row
elif type == D.CSV_SPECTYPE_CONF:
tableDict[fields[1]] = row
headerFields.append(fields[1])
setTabContent(msg, data, tableDict, h)
if (status in [CSV_SPECTYPE_DATA, CSV_SPECTYPE_KEYS]):
if (status in [D.CSV_SPECTYPE_DATA, D.CSV_SPECTYPE_KEYS]):
tableDict = getTabContent(msg, data, h)
if type == CSV_SPECTYPE_DATA:
if type == D.CSV_SPECTYPE_DATA:
tableDict[B.DATA_NODE_HEADER] = headerFields
setTabContent(msg, data, tableDict, h)
print("return getCsvSpec "+str(data))
@ -231,7 +219,7 @@ def getTabContent(msg, data, path):
def readCsv(msg, filename, comp):
job = basic.program.Job.getInstance()
verify = -1+job.getDebugLevel("tdata_tool")
verify = -1+job.getDebugLevel(TOOL_NAME)
job.debug(verify, "readCsv " + filename)
fields = []
nodes = []
@ -249,7 +237,7 @@ def readCsv(msg, filename, comp):
job.debug(verify, str(state) + " line " + line + " :" + str(len(fields)) + ": " + str(fields))
if len(testline) < 2 and state < 1:
state = 0
elif fields[0].lower() in CSV_HEADER_START:
elif fields[0].lower() in D.CSV_HEADER_START:
state = 2
columns = []
cnt = len(fields)
@ -304,14 +292,14 @@ def setSubnode(i, nodes, data, tree):
def getDataStructure(comp):
# gets data-structure from the vml in the component-folder
job = basic.program.Job.getInstance()
verify = -1+job.getDebugLevel("tdata_tool")
verify = -1+job.getDebugLevel(TOOL_NAME)
job.debug(verify, "getDataStructure " + comp)
def normalizeDataRow(dstruct, xpathtupel, row, referencedate):
# normalize data of the row if necessary
# raw-value is saved as new field with _raw as suffix
job = basic.program.Job.getInstance()
verify = -1+job.getDebugLevel("tdata_tool")
verify = -1+job.getDebugLevel(TOOL_NAME)
job.debug(verify, "calcDataRow " + row)
def writeCsvData(filename, tdata, comp):
@ -323,7 +311,7 @@ def writeCsvData(filename, tdata, comp):
:return:
"""
job = basic.program.Job.getInstance()
verify = -1+job.getDebugLevel("tdata_tool")
verify = -1+job.getDebugLevel(TOOL_NAME)
job.debug(verify, "writeDataTable " + str(comp))
text = "table"
for f in tdata[B.DATA_NODE_HEADER]:

Loading…
Cancel
Save