Browse Source

buildCsvSpec()

master
Ulrich Carmesin 2 years ago
parent
commit
e30cd5bd95
  1. 19
      basic/step.py
  2. 47
      utils/data_const.py
  3. 6
      utils/i18n_tool.py
  4. 43
      utils/tdata_tool.py

19
basic/step.py

@ -14,6 +14,7 @@ b) execute specific test-entity in the test-suite-execution
"""
import basic.constants as B
import utils.data_const as D
import utils.i18n_tool
LIST_ARGS = [
"start", # for starting the specified main-program
@ -34,6 +35,11 @@ class Step:
self.execStep = ""
self.args = {}
def getStepText(self, job):
text = self.comp+D.CSV_DELIMITER+str(self.execStep)+D.CSV_DELIMITER+self.refLine
for k in self.args:
text += D.CSV_DELIMITER+k+":"+self.args[k]
return text+"\n"
def parseOldStep(job, fields):
step = {}
@ -63,7 +69,7 @@ def parseOldStep(job, fields):
return step
def parseStep(job, fields):
step = Step
step = Step()
step.comp = fields[D.STEP_COMP_I]
step.execStep = fields[D.STEP_EXECNR_I]
step.refLine = fields[D.STEP_REFNR_I]
@ -89,3 +95,14 @@ def parseStep(job, fields):
setattr(step, b[0], b[1])
# data[B.DATA_NODE_STEPS].append(step)
return step
def getStepHeader(job):
text = "# "
text += utils.i18n_tool.I18n.getInstance().getText(f"{D.CSV_BLOCK_STEP=}", job)
text += ";"+utils.i18n_tool.I18n.getInstance().getText(f"{D.STEP_ATTR_COMP=}", job)
text += ";"+utils.i18n_tool.I18n.getInstance().getText(f"{D.STEP_ATTR_EXECNR=}", job)
text += ";"+utils.i18n_tool.I18n.getInstance().getText(f"{D.STEP_ATTR_REFNR=}", job)
text += ";"+utils.i18n_tool.I18n.getInstance().getText(f"{D.STEP_ATTR_ARGS=}", job)
return text + ";..;;;\n"

47
utils/data_const.py

@ -2,6 +2,7 @@
"""
constants for used for api-functions
"""
import basic.constants as B
DDL_FILENAME = "DATASTRUCTURE"
@ -23,15 +24,44 @@ DDL_TYPE = "type"
DFILE_TYPE_YML = "yml"
DFILE_TYPE_JSON = "json"
DFILE_TYPE_CSV = "csv"
DFILE_TESTCASE_NAME = "testspec"
DFILE_TESTSUITE_NAME = "testsuite"
DFILE_TABLE_PREFIX = "table_"
LIST_DFNAME_ATTR = [DFILE_TESTCASE_NAME, DFILE_TESTSUITE_NAME, DFILE_TABLE_PREFIX]
LIST_DFNAME_CONST = ["DFILE_TESTCASE_NAME", "DFILE_TESTSUITE_NAME", "DFILE_TABLE_PREFIX"]
DATA_SRC_DIR = "dir"
DATA_SRC_CSV = "csv"
DATA_ATTR_COUNT = "_count"
""" statistical information of data-count """
DATA_ATTR_DATE = "_date"
""" reference-date for computing the actual date in relation to specification or expectation """
DATA_ATTR_COMP = "_comp"
""" reference to using componente with their object """
DATA_ATTR_CHAR = "_char"
""" character of the data in order to delete it ión initialization """
DATA_ATTR_ALIAS = "_alias"
LIST_DATA_ATTR = [DATA_ATTR_COUNT, DATA_ATTR_DATE, DATA_ATTR_CHAR, DATA_ATTR_COMP, DATA_ATTR_ALIAS]
LIST_ATTR_CONST = ["DATA_ATTR_COUNT", "DATA_ATTR_DATE", "DATA_ATTR_CHAR", "DATA_ATTR_COMP", "DATA_ATTR_ALIAS"]
HEAD_ATTR_DESCR = "decription"
HEAD_ATTR_TARGET = "target"
HEAD_ATTR_USECASE = "usecase"
HEAD_ATTR_UCID = "usecase-id"
HEAD_ATTR_STORY = "story"
HEAD_ATTR_STORYID = "storyid-id"
HEAD_ATTR_APPS = B.SUBJECT_APPS
HEAD_ATTR_DEPR = "deprecated"
LIST_HEAD_ATTR = [HEAD_ATTR_DESCR, HEAD_ATTR_TARGET, HEAD_ATTR_USECASE, HEAD_ATTR_UCID,
HEAD_ATTR_STORY, HEAD_ATTR_STORYID, HEAD_ATTR_APPS, HEAD_ATTR_DEPR]
LIST_HEAD_CONST = ["HEAD_ATTR_DESCR", "HEAD_ATTR_TARGET", "HEAD_ATTR_USECASE", "HEAD_ATTR_UCID",
"HEAD_ATTR_STORY", "HEAD_ATTR_STORYID", "HEAD_ATTR_APPS", "HEAD_ATTR_DEPR"]
CSV_HEADER_START = ["node", "table", "tabelle"]
CSV_DELIMITER = ";"
INTERNAL_DELIMITER = "||"
"""
internal structure of testdata
@ -42,13 +72,26 @@ CSV_SPECTYPE_KEYS = "keys"
CSV_SPECTYPE_CONF = "conf"
CSV_NODETYPE_KEYS = "_keys"
CSV_BLOCK_OPTION = "option"
CSV_BLOCK_STEP = "step"
CSV_BLOCK_HEAD = "_head"
CSV_BLOCK_OPTION = B.DATA_NODE_OPTION
CSV_BLOCK_STEP = B.DATA_NODE_STEPS
CSV_BLOCK_TABLES = B.DATA_NODE_TABLES
CSV_BLOCK_IMPORT = "_import"
LIST_CSV_BLOCKS = [CSV_BLOCK_HEAD, CSV_BLOCK_OPTION, CSV_BLOCK_STEP, CSV_BLOCK_TABLES, CSV_BLOCK_IMPORT]
LIST_BLOCK_CONST = ["CSV_BLOCK_HEAD", "CSV_BLOCK_OPTION", "CSV_BLOCK_STEP", "CSV_BLOCK_TABLES", "CSV_BLOCK_IMPORT"]
STEP_COMP_I = 1
STEP_EXECNR_I = 2
STEP_REFNR_I = 3
STEP_ARGS_I = 4
STEP_LIST_I = 4
STEP_ATTR_COMP = "component"
STEP_ATTR_EXECNR = "exec-step"
STEP_ATTR_REFNR = "reference-nr"
STEP_ATTR_ARGS = "arguments"
LIST_STEP_ATTR = [STEP_ATTR_COMP, STEP_ATTR_EXECNR, STEP_ATTR_REFNR, STEP_ATTR_ARGS]
LIST_STEP_CONST = ["STEP_ATTR_COMP", "STEP_ATTR_EXECNR", "STEP_ATTR_REFNR", "STEP_ATTR_ARGS"]
EXCP_MALFORMAT = "malformated line: "
ATTR_SRC_TYPE = "tdtyp"

6
utils/i18n_tool.py

@ -46,10 +46,12 @@ class I18n:
"""
if job is None:
job = basic.program.Job.getInstance()
if "language" in job.conf.confs:
language = job.conf.confs["language"]
else:
language = "en"
if language not in self.cache:
raise Exception(EXP_KEY_MISSING, (key))
print(key)
out = self.extractText(key)
key = self.extractKey(key)
if key in self.cache[language]:
@ -80,9 +82,7 @@ class I18n:
else:
y = x
key = y
print("= in key " + y)
else:
print("!= in key")
y = key
return key

43
utils/tdata_tool.py

@ -5,6 +5,7 @@
# Source : gitea.ucarmesin.de
# ---------------------------------------------------------------------------------------------------------
import os.path
import inspect
import basic.program
import utils.config_tool
import utils.file_tool
@ -230,7 +231,7 @@ def parseCsvSpec(msg, lines, ttype, tdata, job=None):
for k in tdata:
if B.DATA_NODE_DATA in tdata[k]:
tdata[k].pop(B.DATA_NODE_DATA)
if B.DATA_NODE_TABLES in tdata[B.DATA_NODE_TABLES]:
if B.DATA_NODE_TABLES in tdata and B.DATA_NODE_TABLES in tdata[B.DATA_NODE_TABLES]:
for k in tdata[B.DATA_NODE_TABLES][B.DATA_NODE_TABLES]:
if k in tdata[B.DATA_NODE_TABLES]:
print("Error")
@ -371,17 +372,51 @@ def buildCsvData(tdata, table, job=None):
text += k+";"+str(tdata[k])+"\n"
header = utils.i18n_tool.I18n.getInstance().getText(f"{B.DATA_NODE_TABLES=}", job)+":"+table
for f in tdata[B.DATA_NODE_HEADER]:
header += ";"+f
header += D.CSV_DELIMITER+f
text += header + "\n"
i = 0
for r in tdata[B.DATA_NODE_DATA]:
row = ""
if B.ATTR_DATA_COMP in r:
for k in r[B.ATTR_DATA_COMP]:
row += ","+k+":"+r[B.ATTR_DATA_COMP][k]
row = row[1:]
i += 1
for f in tdata[B.DATA_NODE_HEADER]:
if f in r:
row += ";"+str(r[f])
row += D.CSV_DELIMITER+str(r[f])
else:
row += ";"
row += D.CSV_DELIMITER
text += row
text += "\n"
return text
def buildCsvSpec(tdata, job=None):
text = ""
if D.CSV_BLOCK_IMPORT in tdata:
for k in tdata[D.CSV_BLOCK_HEAD]:
text += utils.i18n_tool.I18n.getInstance().getText(f"{D.CSV_BLOCK_HEAD=}", job)
text += ":"+k+D.CSV_DELIMITER+tdata[D.CSV_BLOCK_HEAD][k]+"\n"
text += "# option:key ;values;..;;;;\n"
if D.CSV_BLOCK_OPTION in tdata:
for k in tdata[D.CSV_BLOCK_OPTION]:
text += utils.i18n_tool.I18n.getInstance().getText(f"{D.CSV_BLOCK_OPTION=}", job)
text += ":" + k + D.CSV_DELIMITER + getHeadArgs(tdata[D.CSV_BLOCK_OPTION][k], job)+"\n"
text += "#;;;;;;\n"
if D.CSV_BLOCK_STEP in tdata:
text += basic.step.getStepHeader(job)
i = 1
for step in tdata[D.CSV_BLOCK_STEP]:
text += utils.i18n_tool.I18n.getInstance().getText(f"{D.CSV_BLOCK_STEP=}", job) + ":" + str(i)
text += D.CSV_DELIMITER + step.getStepText(job)
i += 1
text += "#;;;;;;\n"
if D.CSV_BLOCK_TABLES in tdata:
for k in tdata[D.CSV_BLOCK_TABLES]:
text += buildCsvData(tdata[D.CSV_BLOCK_TABLES][k], k, job)
text += "#;;;;;;\n"
return text
def getHeadArgs(value, job):
return value.replace(D.INTERNAL_DELIMITER, D.CSV_DELIMITER)

Loading…
Cancel
Save