Browse Source

on refactoring: file_type

refactor
Ulrich 1 year ago
parent
commit
44d08444f7
  1. 8
      basic/Testserver.py
  2. 4
      basic/constants.py
  3. 2
      model/catalog.py
  4. 30
      model/entity.py
  5. 25
      model/factory.py
  6. 2
      model/project.py
  7. 2
      model/testcase.csv
  8. 81
      model/testcase.py
  9. 3
      model/user.csv
  10. 31
      model/user.py
  11. 4
      test/test_10job.py
  12. 10
      test/test_15user.py
  13. 36
      test/test_27testcase.py
  14. 3
      test/test_31filecsv.py
  15. 20
      tools/config_tool.py
  16. 33
      tools/db_abstract.py
  17. 13
      tools/dbmysql_tool.py
  18. 12
      tools/file_tool.py
  19. 316
      tools/file_type.py
  20. 37
      tools/filecsv_fcts.py

8
basic/Testserver.py

@ -24,6 +24,8 @@ class Testserver():
"""
tables = {}
__instance = None
def __init__(self, job):
"""
collect all resources into this object
@ -40,9 +42,15 @@ class Testserver():
self.conf[B.TOPIC_CONN][B.TOPIC_NODE_DB][attr] = job.conf[B.TOPIC_NODE_DB][attr]
# TODO was muss auf dem Testserver initial geladen werden?
self.model = {}
Testserver.__instance = self
for s in B.LIST_SUBJECTS:
self.model[tools.data_tool.getSingularKeyword(s)] = model.factory.get_entity_object(job, s, {})
@staticmethod
def getInstance(job):
if Testserver.__instance == None:
return Testserver(job)
def restInit(self):
if not B.DATA_NODE_DDL in self.conf:
self.conf[B.DATA_NODE_DDL] = {}

4
basic/constants.py

@ -161,6 +161,8 @@ SUBJECT_COMP = "component"
SUBJECT_COMPS = SUBJECT_COMP + "s"
SUBJECT_USECASE = "usecase"
SUBJECT_USECASES = SUBJECT_USECASE + "s"
SUBJECT_USER = "user"
SUBJECT_USECRS = SUBJECT_USER + "s"
SUBJECT_REL = "release"
SUBJECT_RELS = SUBJECT_REL + "s"
SUBJECT_TESTCASE = "testcase"
@ -200,7 +202,7 @@ SUBJECT_DATATABLES = SUBJECT_DATATABLE + "s"
# List of persistent models
LIST_SUBJECTS = [SUBJECT_PROJECTS, SUBJECT_APPS, SUBJECT_USECASES, SUBJECT_VARIANTS, SUBJECT_RELS, SUBJECT_STORIES,
SUBJECT_TESTPLANS, SUBJECT_TESTSUITES, SUBJECT_TESTCASES, SUBJECT_STEPS, SUBJECT_DATATABLES,
SUBJECT_ENVIRONMENTS, SUBJECT_COMPS, SUBJECT_ARTIFACTS]
SUBJECT_ENVIRONMENTS, SUBJECT_COMPS, SUBJECT_ARTIFACTS, "storys"]
# --Topic -----------------------------------------------------
# _____ _

2
model/catalog.py

@ -105,7 +105,7 @@ class Catalog:
data = tools.file_tool.read_file_dict(job, pathname, msg, D.CSV_SPECTYPE_CTLG)
if hasattr(job, "m"):
job.m.debug(12, "domain " + domain + " readed from " + pathname)
self.catalog[domain] = data[domain][B.DATA_NODE_KEYS]
self.catalog[domain] = data[B.DATA_NODE_KEYS]
return data

30
model/entity.py

@ -2,7 +2,7 @@ import getpass
import os
import re
import basic.toolHandling
import model.factory
#import model.factory
# import model.entity
import tools.data_const as D
import tools.path_const as P
@ -107,7 +107,7 @@ class Entity:
if storage == STORAGE_DB:
entity = self.select_entity(job, k)
elif storage == STORAGE_FILE:
entity = self.read_entity(job, k, B.ATTR_INST_TESTSERVER)
entity = self.read_entity(job, k)
else:
entity = self.read_entity(job, k)
entities.append(entity)
@ -169,6 +169,16 @@ class Entity:
"""
raise Exception(B.EXCEPT_NOT_IMPLEMENT)
def check_tdata(self, job, tdata: dict) -> dict:
"""
it checks the data for the specific form
:param job:
:param tdata:
:param ttype:
:return:
"""
raise Exception(B.EXCEPT_NOT_IMPLEMENT)
def select_entity(self, job, name):
"""
reads the entity from the database
@ -258,7 +268,7 @@ class Entity:
:param name:
:return:
"""
config = tools.config_tool.getConfig(job, module, subject, ttype)
config = tools.config_tool.getConfig(job, module, subject, ttype=ttype)
oldConfig = config
if config is not None:
if subject not in config:
@ -284,6 +294,17 @@ class Entity:
return config
raise Exception("keine Config zu "+name)
@staticmethod
def set_subtables(job, tdata: dict) -> dict:
"""
gets the subtable-tag from filecsv and sets the subtables in order to workable entity-elements
:param job:
:param tdata:
:return:
"""
raise Exception("not implemented ")
@staticmethod
def getDirlist(job, path, ext) -> list:
outList = []
@ -320,6 +341,7 @@ class Entity:
:return:
"""
""" 2023-05 """
import model.factory
verify = False
if not job is None:
self.job = job
@ -337,6 +359,8 @@ class Entity:
setattr(self, tools.data_tool.getSingularKeyword(k), config[rootname][key])
setattr(self, D.FIELD_NAME, rootname)
for k in subjects:
# tables: { person: { _header: [] , _data: {} } }
#
if k in [B.DATA_NODE_DATA, B.DATA_NODE_HEADER, B.DATA_NODE_FIELDS, B.DATA_NODE_ROW]:
continue
objects = {}

25
model/factory.py

@ -1,33 +1,36 @@
import model.entity
import basic.constants as B
import basic.Testserver
def get_entity_object(job, name, args):
if name in B.SUBJECT_STEPS:
if name in [B.SUBJECT_STEPS, B.SUBJECT_STEP]:
entity = getStep(job)
elif name in B.SUBJECT_STORIES:
elif name in [B.SUBJECT_STORIES, B.SUBJECT_STORY]:
entity = getStory(job)
elif name in B.SUBJECT_VARIANTS:
elif name in [B.SUBJECT_VARIANTS, B.SUBJECT_VARIANT]:
entity = getVariant(job)
elif name in B.SUBJECT_DATATABLES:
elif name in [B.SUBJECT_DATATABLES, B.SUBJECT_DATATABLE]:
entity = getDatatable(job)
elif name in B.SUBJECT_USECASES:
elif name in [B.SUBJECT_USECASES, B.SUBJECT_USECASE]:
entity = getUsecase(job)
elif name in B.SUBJECT_PROJECTS:
elif name in [B.SUBJECT_PROJECTS, B.SUBJECT_PROJECT]:
entity = getProject(job)
elif name in B.SUBJECT_APPS:
elif name in [B.SUBJECT_APPS, B.SUBJECT_APP]:
entity = getApplication(job)
elif name in B.SUBJECT_COMPS:
elif name in [B.SUBJECT_COMPS, B.SUBJECT_COMP]:
entity = getComponent(job)
elif name in B.SUBJECT_TESTCASES:
elif name in [B.SUBJECT_TESTCASES, B.SUBJECT_TESTCASE]:
entity = getTestcase(job)
elif name in B.SUBJECT_TESTSUITES:
elif name in [B.SUBJECT_TESTSUITES]:
entity = getTestsuite(job)
elif name in B.SUBJECT_TESTPLAN:
elif name in [B.SUBJECT_TESTPLANS, B.SUBJECT_TESTPLAN]:
entity = getTestplan(job)
else:
return None
entity.setAttributes(job, args, name, entity.getFieldList(), entity.getNodeList(), entity.getSubtableList())
#testserver = basic.Testserver.getInstance(job)
return entity
def getEnvironment(job=None):

2
model/project.py

@ -255,7 +255,7 @@ class Project(model.entity.Entity):
:param name:
:return:
"""
config = tools.config_tool.getConfig(job, P.KEY_BASIC, subject)
config = tools.config_tool.getConfig(job, P.KEY_BASIC, subject, ttype=B.SUBJECT_PROJECT)
if config is not None:
if len(name) == 0:
return config

2
model/testcase.csv

@ -2,7 +2,7 @@ _type;ctlg;;;;;;;;;
_key;_field;;;;;;;;;
table:testcase;_field;type;format;index;generic;aggregat;key;acceptance;alias;description
;tcid;pk;integer;N;;;T:1;;;
;name;str;vchar(256);I;;;F:1;;;
;name;str;vchar(256);I;;;F:1;;;# name of the component-table
;description;string;vchar(256);N;;;;;;
;project;string;vchar(256);I;;;;;;
;application;string;vchar(256);N;;;;;;

1 _type ctlg
2 _key _field
3 table:testcase _field type format index generic aggregat key acceptance alias description
4 tcid pk integer N T:1
5 name str vchar(256) I F:1 # name of the component-table
6 description string vchar(256) N
7 project string vchar(256) I
8 application string vchar(256) N

81
model/testcase.py

@ -18,6 +18,7 @@ import model.entity
import model.story
import model.datatable
import model.step
import model.factory
TABLE_NAMES = ["application", "ap_project", "ap_component"]
STORAGES = [model.entity.STORAGE_FILE, model.entity.STORAGE_DB]
@ -32,7 +33,7 @@ FIELD_DESCRIPTION = B.SUBJECT_DESCRIPTION
FIELD_REFERENCE = B.SUBJECT_REFERENCE
FIELD_PROJECT = B.SUBJECT_PROJECT
FIELD_APPLICATION = B.SUBJECT_APP
LIST_FIELDS = [FIELD_ID, FIELD_NAME, FIELD_APPLICATION,
LIST_FIELDS = [FIELD_ID, FIELD_NAME,
FIELD_DESCRIPTION, FIELD_REFERENCE, FIELD_PROJECT]
""" list of object-attributes """
LIST_NODES = [B.NODE_ATTRIBUTES]
@ -41,11 +42,12 @@ SUB_USECASE = B.SUBJECT_USECASES
SUB_STORIES = B.SUBJECT_STORIES
SUB_STEPS = "steps"
SUB_TABLES = "tables"
LIST_SUBTABLES = {
LIST_SUBTABLES = { # with additional attributes for the subtable
B.SUBJECT_APPS: [],
SUB_TABLES: [D.DATA_ATTR_DATE],
SUB_STEPS: [],
SUB_USECASE: [B.SUBJECT_DESCRIPTION, B.SUBJECT_REFERENCE],
SUB_STORIES: [B.SUBJECT_DESCRIPTION, B.SUBJECT_REFERENCE],
SUB_STORIES: [B.SUBJECT_DESCRIPTION, B.SUBJECT_REFERENCE]
}
LIST_SUB_DESCRIPT = [D.DATA_ATTR_USECASE_DESCR, D.DATA_ATTR_STORY_DESCR]
@ -67,6 +69,10 @@ class Testcase(model.entity.Entity):
steps -> comp.step
tables -> comp.table
"""
FIELD_ID = "tcid"
LIST_FIELDS = [FIELD_ID, D.FIELD_NAME, B.SUBJECT_APP,
B.SUBJECT_DESCRIPTION, B.SUBJECT_REFERENCE, B.SUBJECT_PROJECT]
LIST_NODES = [B.NODE_ATTRIBUTES]
tcid = ""
name = ""
description = ""
@ -119,6 +125,75 @@ class Testcase(model.entity.Entity):
self.setAttributes(job, config, name, LIST_FIELDS, LIST_NODES, LIST_SUBTABLES)
return self
@staticmethod
def set_subtables(job, tdata: dict) -> dict:
"""
gets the subtable-tag from filecsv and sets the subtables in order to workable entity-elements
:param job:
:param tdata:
:return:
"""
outData = {}
subtables = {}
arguments = {}
for k in tdata:
if k in [B.DATA_NODE_TABLES]:
outData[k[1:]] = tdata[k]
elif k in [B.DATA_NODE_OPTION, "_head"]:
outData[k[1:]] = {}
for e in tdata[k]:
ka = str(e).split("-")
if ka[0] in B.LIST_SUBJECTS:
# not necceassary, because here are single-names with fieldname
ka[0] = ka[0][:-1]
if len(ka) == 1:
# add reference name to potential subtable
ka.append("name")
if ka[0]+"s" in B.LIST_SUBJECTS:
# standard table form: step: { _header: [], _data: [{}, {}, ..] }
if ka[0] in subtables:
subject = subtables[ka[0]]
sublist = subject[B.DATA_NODE_DATA]
header = subject[B.DATA_NODE_HEADER]
else:
enty = model.factory.get_entity_object(job, ka[0], {})
header = enty.getFieldList() + enty.getNodeList()
sublist = []
subject = {}
subject[B.DATA_NODE_DATA] = sublist
subject[B.DATA_NODE_HEADER] = header
#subtables[ka[0]]
string = str(tdata[k][e])
values = str(tdata[k][e]).split(D.INTERNAL_DELIMITER)
i = 0
for v in values:
if len(v) < 1:
continue
if len(sublist) > i:
subtable = sublist[i]
else:
subtable = {}
sublist.append(subtable)
subtable[ka[1]] = v
sublist[i] = subtable
i += 1
subject[B.DATA_NODE_DATA] = sublist
subtables[ka[0]] = subject
else: # attributes
arguments[e] = tdata[k][e]
elif k in B.LIST_SUBJECTS:
outData[k] = tdata[k]
elif k in B.LIST_DATA_NODE:
outData[k[1:]] = tdata[k]
elif k[1:] in Testcase.LIST_FIELDS:
outData[k[1:]] = tdata[k]
else:
raise Exception("unknown tag in csv-data: "+k)
for s in subtables:
outData[s] = subtables[s]
outData[B.DATA_NODE_ARGS] = arguments
return outData
def getFieldList(self):
"""
returns a list of scalar attributes

3
model/user.csv

@ -1,4 +1,4 @@
_type;ctlg;;;;;;;;;
_type;ddl;;;;;;;;;
_key;_field;;;;;;;;;
table:user;_field;type;format;index;generic;aggregat;key;acceptance;alias;description
;id;pk;autoint;N;;;T:1;;;
@ -9,3 +9,4 @@ table:user;_field;type;format;index;generic;aggregat;key;acceptance;alias;descri
;password;string;vchar(256);N;;;;;;
;project;string;vchar(256);N;;;;;;
;role;string;vchar(256);N;;;;;;
;attributes;string;vchar(4098);N;;;;;;

1 _type ctlg ddl
2 _key _field
3 table:user _field type format index generic aggregat key acceptance alias description
4 id pk autoint N T:1
9 password string vchar(256) N
10 project string vchar(256) N
11 role string vchar(256) N
12 attributes string vchar(4098) N

31
model/user.py

@ -35,6 +35,12 @@ IDENTIFYER_FIELDS = [FIELD_ID]
""" unique technical field as technical identifer """
class User(model.entity.Entity):
LIST_FIELDS = [FIELD_ID, FIELD_ROLE, FIELD_PROJECT, FIELD_PASSWORD, FIELD_EMAIL, FIELD_FAMNAME, FIELD_NAME,
FIELD_USERNAME]
""" list of object-attributes """
LIST_NODES = [B.NODE_ATTRIBUTES]
LIST_SUBTABLES = {}
UNIQUE_FIELDS = [FIELD_USERNAME]
id = 0
username = ""
name = ""
@ -43,11 +49,7 @@ class User(model.entity.Entity):
password = ""
project = ""
role = ""
def xx__init__(self, job, name=""):
self.job = job
if len(name) > 1:
self.getEntity(job, name)
attributes = ""
def read_unique_names(self, job, project, application, gran, args):
"""
@ -112,6 +114,21 @@ class User(model.entity.Entity):
setattr(self, k, config[k])
return self
def check_data(self, job, data: dict) -> dict:
"""
it checks the data for the specific form
:param job:
:param tdata:
:param ttype:
:return:
"""
import tools.file_type
checkNodes = {}
checkNodes[tools.file_type.MUST_NODES] = [B.DATA_NODE_HEADER, B.DATA_NODE_FIELDS, B.DATA_NODE_KEYS]
checkNodes[tools.file_type.MUSTNT_NODES] = [B.DATA_NODE_DATA]
checkNodes[tools.file_type.OPT_NODES] = []
return tools.file_type.check_nodes(job, data, checkNodes)
def getFieldList(self):
"""
returns a list of scalar attributes
@ -278,11 +295,11 @@ class User(model.entity.Entity):
:param name:
:return:
"""
config = tools.config_tool.getConfig(job, P.KEY_USER, name)
config = tools.config_tool.getConfig(job, P.KEY_USER, name, ttype=B.SUBJECT_USER)
if config is not None:
return config
if name == model.user.User.getCurrentUser(job):
config = tools.config_tool.getConfig(job, P.KEY_USER, "default")
config = tools.config_tool.getConfig(job, P.KEY_USER, "default", ttype=B.SUBJECT_USER)
if "user" in config:
config = config["user"]
if config is not None:

4
test/test_10job.py

@ -118,14 +118,14 @@ class MyTestCase(unittest.TestCase):
'application': 'TESTAPP', 'environment': 'ENV01', 'testcase': 'TC0002'}
res = tools.job_tool.select_variant(job, "", "Testfall", args)
print(str(res))
self.assertEqual(len(res), 3)
self.assertEqual(3, len(res))
self.assertEqual("01_init_testcase", res[0])
args = {'proc': 'Testfall ausfuehren', 'gran': 'Testfall', 'user': 'ulrich',
'project': 'TESTPROJ', 'step': '1', 'program': 'test_executer',
'application': 'TESTAPP', 'environment': 'ENV01', 'testcase': 'TC0001'}
res = tools.job_tool.select_variant(job, "", "Testfall", args)
print(str(res))
self.assertEqual(len(res), 3)
self.assertEqual(3, len(res))
self.assertEqual("01_init_testcase", res[0])
def test_14selectTestplan(self):

10
test/test_15user.py

@ -16,7 +16,7 @@ HOME_PATH = test.constants.HOME_PATH
PYTHON_CMD = "python"
TEST_FUNCTIONS = ["test_10getEntityNames", "test_11getEntities", "test_12getEntity",
"test_13writeEntity", "test_14insertEntity"]
# TEST_FUNCTIONS = ["test_14insertEntity"]
#TEST_FUNCTIONS = ["test_10getEntityNames"]
PROGRAM_NAME = "clean_workspace"
class MyTestCase(unittest.TestCase):
@ -34,9 +34,9 @@ class MyTestCase(unittest.TestCase):
user = model.user.User(job)
entityNames = []
entityNames = user.read_unique_names(job, "", "", "", {})
self.assertEquals(type(entityNames), list)
self.assertEqual(type(entityNames), list)
entityNames = user.select_unique_names(job, "", "", "", {})
self.assertEquals(type(entityNames), list)
self.assertEqual(type(entityNames), list)
def test_11getEntities(self):
global mymsg
@ -91,7 +91,7 @@ class MyTestCase(unittest.TestCase):
entityNames = user.get_unique_names(job, storage=model.entity.STORAGE_FILE)
self.assertIn(username, entityNames)
actUser = user.read_entity(job, username)
self.assertEquals(getattr(actUser, model.user.FIELD_USERNAME), username)
self.assertEqual(getattr(actUser, model.user.FIELD_USERNAME), username)
actUser.remove_entity(job, username)
entityNames = user.get_unique_names(job, storage=model.entity.STORAGE_FILE)
self.assertNotIn(username, entityNames)
@ -116,7 +116,7 @@ class MyTestCase(unittest.TestCase):
entityNames = collectInnerList(user.get_unique_names(job, storage=model.entity.STORAGE_DB))
self.assertIn(username, entityNames)
actUser = user.select_entity(job, username)
self.assertEquals(getattr(actUser, model.user.FIELD_USERNAME), username)
self.assertEqual(getattr(actUser, model.user.FIELD_USERNAME), username)
actUser.delete_entity(job, username, "user")
entityNames = collectInnerList(user.get_unique_names(job, storage=model.entity.STORAGE_DB))
self.assertNotIn(username, entityNames)

36
test/test_27testcase.py

@ -16,7 +16,7 @@ HOME_PATH = test.constants.HOME_PATH
PYTHON_CMD = "python"
TEST_FUNCTIONS = ["test_10getEntityNames", "test_11getEntities", "test_12getEntity",
"test_13writeEntity", "test_14insertEntity"]
TEST_FUNCTIONS = ["test_10getEntityNames", "test_12getEntity"]
TEST_FUNCTIONS = ["test_20setSubtable"]
PROGRAM_NAME = "clean_workspace"
class MyTestCase(unittest.TestCase):
@ -66,5 +66,39 @@ class MyTestCase(unittest.TestCase):
self.assertEqual(getattr(acttestcase, model.testcase.FIELD_NAME), name)
self.assertRaises(Exception, testcase.read_entity, job, "xyzxyz")
def test_20setSubtable(self):
global mymsg
global jobObject
actfunction = str(inspect.currentframe().f_code.co_name)
cnttest = 0
if actfunction not in TEST_FUNCTIONS:
return
job = test.testtools.getJob()
testcase = model.testcase.Testcase(job, "TESTPROJ")
#testcase.set_subtables(job, {})
tdata = {
"_name": "TC0001",
"_head": {
B.SUBJECT_APP: "TESTPROJ||||||"
},
B.DATA_NODE_OPTION: {
"_input": "testa",
"story-description": "ösdigjiuj||||||",
"story": "US-1234||||||",
"story-id": "US-1234||||||"
}
}
result = testcase.set_subtables(job, tdata)
print(str(result))
self.assertIn(B.SUBJECT_APP, result)
self.assertIn(B.SUBJECT_STORY, result)
self.assertNotIn(B.SUBJECT_USECASE, result)
tdata[B.DATA_NODE_OPTION][B.SUBJECT_USECASE] = "UC-321||UC-123||||"
tdata[B.DATA_NODE_OPTION]["usecase-description"] = "ldkfj||kdjf||||"
result = testcase.set_subtables(job, tdata)
self.assertIn(B.SUBJECT_USECASE, result)
print(str(result))
self.assertEqual(2, len(result[B.SUBJECT_USECASE][B.DATA_NODE_DATA]))
if __name__ == '__main__':
unittest.main()

3
test/test_31filecsv.py

@ -33,7 +33,7 @@ TEST_FUNCTIONS = [ "test_02isBlock", "test_06parseCsv",
"test_11ddl", "test_12catalog", "test_13getCsvSpec_tree", "test_14getCsvSpec_key",
"test_15getCsvSpec_conf", "test_16getCsvSpec_data"
]
TEST_FUNCTIONS = [ "test_02isBlock"
TEST_FUNCTIONS = [ "test_20setSubtable"
]
PROGRAM_NAME = "clean_workspace"
@ -181,6 +181,7 @@ class MyTestCase(unittest.TestCase):
if actfunction not in TEST_FUNCTIONS:
return
#job = basic.program.SimpleJob(PROGRAM_NAME)
# def isBlock(self, msg, job, field: str, block: str, status: str) -> bool:
job = test.testtools.getWorkspaceJob(PROGRAM_NAME)
f = toolHandling.getFileTool(job, None, "csv")
res = f.isBlock(job.m, job, "_variants", D.CSV_BLOCK_SUBTABLES, "status")

20
tools/config_tool.py

@ -160,18 +160,18 @@ def getCompPath(job, name, subname, filename):
if configpath is not None:
return configpath
if name == B.ATTR_INST_TESTSERVER:
print(name)
#print(name)
for format in CONFIG_FORMAT:
pathname = os.path.join(job.conf[B.TOPIC_PATH][P.ATTR_PATH_PROGRAM],
P.ATTR_PATH_MODEL, subname + "." + format)
print(pathname)
#print(pathname)
if os.path.exists(pathname):
return pathname
for format in CONFIG_FORMAT:
pathname = os.path.join(job.conf[B.TOPIC_PATH][P.ATTR_PATH_COMPONENTS],
basic.componentHandling.getComponentFolder(name), filename + "." + format)
print(pathname)
#print(pathname)
if os.path.exists(pathname):
return pathname
for format in CONFIG_FORMAT:
@ -323,20 +323,8 @@ def getConfig(job, modul: str, name: str, subname: str = "", ttype: str = D.CSV_
return confs
if ttype == "" and modul in ["tool", "comp"]:
ttype = modul
if modul == D.DDL_FILENAME:
doc = tools.file_tool.read_file_dict(job, pathname, msg, ttype)
# in csv the root is the subname
# from the Dict-structure of DDL_FILENAME pick the substructure of the subname
keys = list(doc.keys())
if subname not in keys and len(keys) == 1:
doc0 = doc[keys[0]]
doc = doc0
keys = list(doc.keys())
if subname in keys:
doc0 = doc[subname]
doc = doc0
else:
doc = tools.file_tool.read_file_dict(job, pathname, msg, ttype)
# TODO ?! unnoetiges verschiebe
for i, v in doc.items():
confs[i] = v
return confs

33
tools/db_abstract.py

@ -48,8 +48,9 @@ import basic.constants as B
import tools.data_const as D
import tools.date_tool
import os
import model.entity
def get_ddl(job, compName, table):
def get_ddl(job, compName: str, table: str) -> dict:
"""
this function read the ddl from the right config
:param job:
@ -57,9 +58,10 @@ def get_ddl(job, compName, table):
:param table:
:return: ddl in correct format
"""
out = {}
conf = tools.config_tool.getConfig(job, D.DDL_FILENAME, compName, table)
if B.DATA_NODE_TABLES in conf and table in conf[B.DATA_NODE_TABLES]:
conf = tools.config_tool.getConfig(job, D.DDL_FILENAME, compName, table, D.CSV_SPECTYPE_DDL)
return conf
""" if B.DATA_NODE_TABLES in conf and table in conf[B.DATA_NODE_TABLES]:
ddl = conf[B.DATA_NODE_TABLES][table]
elif table in conf:
ddl = conf[table]
@ -81,7 +83,7 @@ def get_ddl(job, compName, table):
else:
out = ddl
return out
"""
def getDbAttributes(job, comp, table):
"""
this function collects all relevant db-attributes from any location where it can be set.
@ -89,6 +91,7 @@ def getDbAttributes(job, comp, table):
* comp.artifact.db.[table].attr
* comp.artifact.[db].[table].attr
"""
verify = False
out = {
B.ATTR_DB_DATABASE: "",
B.ATTR_DB_SCHEMA: "",
@ -110,24 +113,24 @@ def getDbAttributes(job, comp, table):
if (B.SUBJECT_ARTIFACTS in comp.conf and table in comp.conf[B.SUBJECT_ARTIFACTS][B.TOPIC_NODE_DB]) \
and (attr in comp.conf[B.SUBJECT_ARTIFACTS][B.TOPIC_NODE_DB][table]):
out[attr] = comp.conf[B.SUBJECT_ARTIFACTS][B.TOPIC_NODE_DB][table][attr]
print("a " + attr + " " + out[attr])
if verify: print("a " + attr + " " + out[attr])
elif (B.SUBJECT_ARTIFACTS in comp.conf and attr in comp.conf[B.SUBJECT_ARTIFACTS][B.TOPIC_NODE_DB]):
out[attr] = comp.conf[B.SUBJECT_ARTIFACTS][B.TOPIC_NODE_DB][attr]
print("b " + attr + " " + out[attr])
if verify: print("b " + attr + " " + out[attr])
elif (B.TOPIC_NODE_DB in comp.conf[B.TOPIC_CONN]) \
and (table in comp.conf[B.TOPIC_CONN][B.TOPIC_NODE_DB]) \
and (attr in comp.conf[B.TOPIC_CONN][table][B.TOPIC_NODE_DB]):
out[attr] = comp.conf[B.TOPIC_CONN][table][B.TOPIC_NODE_DB][attr]
print("c " + attr + " " + out[attr])
if verify: print("c " + attr + " " + out[attr])
elif (B.TOPIC_NODE_DB in comp.conf[B.TOPIC_CONN]) \
and (attr in comp.conf[B.TOPIC_CONN][B.TOPIC_NODE_DB]):
out[attr] = comp.conf[B.TOPIC_CONN][B.TOPIC_NODE_DB][attr]
print("d " + attr + " " + out[attr])
if verify: print("d " + attr + " " + out[attr])
elif (attr in comp.conf[B.TOPIC_CONN]):
out[attr] = comp.conf[B.TOPIC_CONN][attr]
print("e " + attr + " " + out[attr])
if verify: print("e " + attr + " " + out[attr])
else:
print("f " + attr + " " + out[attr])
if verify: print("f " + attr + " " + out[attr])
return out
@ -254,8 +257,8 @@ def formatDbField(comp, val, field):
if field[D.DDL_CONSTRAINT] != B.SVAL_YES:
comp.m.logError("must-field is null " + field[D.DDL_FIELD])
return None
print("formatDbField "+str(comp))
print("formatDbField "+str(field)+" , "+str(val))
#print("formatDbField "+str(comp))
#print("formatDbField "+str(field)+" , "+str(val))
return formatDbVal(comp.m, val, field[D.DDL_TYPE])
@ -330,6 +333,10 @@ class DbFcts():
return getDbAttributes(job, self.comp, table)
def getHeader(self, job):
fields = self.comp.getFieldList()
return self.comp.getFieldList() + self.comp.getNodeList()
def selectTables(self, subdir, job):
""" method to delete rows from a database
statement written in sql """

13
tools/dbmysql_tool.py

@ -28,10 +28,12 @@ class DbFcts(tools.dbrel_tool.DbFcts):
tdata = {}
verify = -1+job.getDebugLevel("db_tool")
attr = self.getDbAttributes(job, B.SVAL_NULL)
sql = "SELECT * FROM "+attr[B.ATTR_DB_DATABASE]+"."+table
header = self.getHeader(job)
sql = "SELECT " + ",".join(header) + " FROM "+attr[B.ATTR_DB_DATABASE]+"."+table
if len(where) > 3:
sql += " "+where
sql += ";"
print("sql " + sql)
self.comp.m.logInfo(sql)
connector = self.getConnector()
try:
@ -41,19 +43,12 @@ class DbFcts(tools.dbrel_tool.DbFcts):
print("except "+str(e))
print("except " + traceback.format_exc())
myresult = mycursor.fetchall()
tdata[B.DATA_NODE_HEADER] = []
for f in self.comp.conf[B.DATA_NODE_DDL][table][B.DATA_NODE_HEADER]:
tdata[B.DATA_NODE_HEADER].append(f)
tdata[B.DATA_NODE_HEADER] = header
tdata[B.DATA_NODE_DATA] = []
for x in myresult:
r = {}
i = 0
if B.DATA_NODE_KEYS in self.comp.conf[B.DATA_NODE_DDL][table]:
header = self.comp.conf[B.DATA_NODE_DDL][table][B.DATA_NODE_KEYS].keys()
keys = self.comp.conf[B.DATA_NODE_DDL][table][B.DATA_NODE_KEYS]
else:
header = self.comp.conf[B.DATA_NODE_DDL][table][B.DATA_NODE_HEADER]
keys = self.comp.conf[B.DATA_NODE_DDL][table]
for f in header:
if (keys[f][D.DDL_TYPE] in [D.TYPE_TIME, D.TYPE_DATE]):
r[f] = tools.date_tool.getFormatdate(x[i], tools.date_tool.F_DIR)

12
tools/file_tool.py

@ -20,7 +20,7 @@ import tools.data_const as D
#import tools.tdata_tool
import tools.date_tool
import basic.constants as B
import tools.file_type
def getDump(obj):
result = vars(obj)
return str(result)
@ -241,8 +241,10 @@ def read_file_dict(job, path: str, msg, ttype: str = D.DFILE_TYPE_CSV) -> dict:
doc = ffcts.load_file(path, ttype)
# tools.tdata_tool.getCsvSpec(msg, job, path, D.CSV_SPECTYPE_CONF)
doc["_path"] = path
check_file_dict(job, doc, msg, ttype)
return doc
# TODO !! refactor to file_type
return tools.file_type.check_tdata(job, doc, ttype)
#check_file_dict(job, doc, msg, ttype)
#return doc
def check_file_dict(job, config: dict, msg, ttype: str):
@ -274,9 +276,11 @@ def check_file_dict(job, config: dict, msg, ttype: str):
elif ttype in [D.CSV_SPECTYPE_COMP]:
MUST_NODES = [B.SUBJECT_ARTIFACTS, B.SUBJECT_STEPS, "functions", B.SUBJECT_DATATABLES]
MUSTNT_NODES = [B.DATA_NODE_DATA]
elif ttype+"s" in B.LIST_SUBJECTS:
print("subject-typ "+ttype+" "+config["_path"])
elif ttype in ["basic", "tool"]:
# tool : tool-specific nodes
print("anderer bekannter Ttyp " + ttype + " " + config["_path"])
#print("anderer bekannter Ttyp " + ttype + " " + config["_path"])
return
else:
print("anderer Ttyp "+ttype+" "+config["_path"])

316
tools/file_type.py

@ -0,0 +1,316 @@
import os
import basic.constants as B
import tools.data_const as D
MUST_NODES = "must"
MUSTNT_NODES = "mustnt"
OPT_NODES = "optional"
def rebuild_tdata(job, tdata: dict, tableAttr: dict, ttype:str) -> dict:
"""
it rebuilds the data into the specific form
:param job:
:param tdata:
:param ttype:
:return:
"""
if ttype == D.CSV_SPECTYPE_DDL:
return DatatypeDDL.rebuild_data(job, tdata, tableAttr)
elif ttype == D.CSV_SPECTYPE_DATA:
return rebuildSpec(job, tdata)
elif ttype == D.CSV_SPECTYPE_CONF:
return rebuildConfig(job, tdata)
elif ttype == D.CSV_SPECTYPE_CTLG:
return DatatypeCatalog.rebuild_data(job, tdata, tableAttr)
# return rebuildCatalog(job, tdata)
elif ttype == D.CSV_SPECTYPE_TREE:
return rebuildCatalog(job, tdata)
elif ttype in B.LIST_SUBJECTS:
return tdata
else:
raise Exception("ttype is not defined " + ttype)
def insert_fields(job, tdata, fields: list, ttype: str) -> dict:
if ttype == D.CSV_SPECTYPE_DDL:
return tdata
elif ttype == D.CSV_SPECTYPE_DATA:
return tdata
elif ttype == D.CSV_SPECTYPE_CONF:
return tdata
elif ttype == D.CSV_SPECTYPE_CTLG:
return tdata
elif ttype == D.CSV_SPECTYPE_TREE:
return tdata
elif ttype in B.LIST_SUBJECTS:
return tdata
else:
job.m.logError("ttype is not defined " + ttype)
return tdata
def buildRow(job, tdata, fields: list) -> dict:
"""
build simple rows from fields
:param job:
:param tdata:
:param fields:
:return:
"""
row = {}
# TODO ? welcher Fall ?
#if ttype == D.CSV_SPECTYPE_DATA and ":" not in fields[0] and D.DATA_ATTR_ALIAS in tableDict:
# fields = [tableDict[D.DATA_ATTR_ALIAS]] + fields
# simple fields of a table:
# table:name;col1;col2;...
# ;val1;val2;...
i = 1
for f in tdata[B.DATA_NODE_HEADER]:
# --> still not used
# TODO arguments - as json or simplified "arg1:val1,arg2:val2,.."
if f in [B.DATA_NODE_ARGS, "args"]:
arguments = {}
row[B.DATA_NODE_ARGS] = arguments
if B.DATA_NODE_ARGS in row:
a = fields[i].split(":")
row[B.DATA_NODE_ARGS][a[0]] = a[1]
# <-- still not used
else:
row[f] = fields[i]
i += 1
for arg in fields[len(tdata[B.DATA_NODE_HEADER])+1:]:
# args := arg1:val1
if len(arg) == 0 or arg.strip()[0:1] == "#":
continue
print("arg "+arg)
a = arg.split(":")
row[B.DATA_NODE_ARGS][a[0]] = a[1]
return row
def check_tdata(job, tdata: dict, ttype:str) -> dict:
"""
it checks the data for the specific form
:param job:
:param tdata:
:param ttype:
:return:
"""
if ttype == D.CSV_SPECTYPE_DDL:
return DatatypeDDL.check_data(job, tdata)
elif ttype == D.CSV_SPECTYPE_DATA:
return checkSpec(job, tdata)
elif ttype == D.CSV_SPECTYPE_CONF:
return checkConfig(job, tdata)
elif ttype == D.CSV_SPECTYPE_COMP:
return checkComp(job, tdata)
elif ttype == D.CSV_SPECTYPE_CTLG:
return DatatypeCatalog.check_data(job, tdata)
elif ttype == D.CSV_SPECTYPE_TREE:
return checkCatalog(job, tdata)
elif ttype in B.LIST_SUBJECTS:
return tdata
elif ttype in ["basic"]:
return tdata
else:
job.m.logError("ttype is not defined " + ttype)
return tdata
def rebuildConfig(job, tdata: dict) -> dict:
return tdata
def insertConfig(job, tdata, fields: list) -> dict:
# TODO test it - configuration exists as a tree (yml, json, ...)
return tdata
def checkConfig(job, tdata: dict) -> dict:
checkNodes = {}
checkNodes[MUST_NODES] = [B.DATA_NODE_HEADER, B.DATA_NODE_DATA]
checkNodes[MUSTNT_NODES] = [B.DATA_NODE_FIELDS, B.DATA_NODE_KEYS]
checkNodes[OPT_NODES] = []
return check_nodes(job, tdata, checkNodes)
def rebuildComp(job, tdata: dict) -> dict:
return tdata
def insertComp(job, tdata, fields: list) -> dict:
# TODO test it - comp-configuration exists as a tree (yml, json, ...)
row = buildRow(job, tdata, fields)
tdata[fields[1]] = row
return tdata
def checkComp(job, tdata: dict) -> dict:
checkNodes = {}
checkNodes[MUST_NODES] = [B.SUBJECT_ARTIFACTS, B.SUBJECT_STEPS, "functions", B.SUBJECT_DATATABLES]
checkNodes[MUSTNT_NODES] = [B.DATA_NODE_DATA]
checkNodes[OPT_NODES] = []
return check_nodes(job, tdata, checkNodes)
class DatatypeCatalog():
"""
structure:
* B.DATA_NODE_HEADER : list of ddl-attributes
* B.DATA_NODE_FIELDS : list of field-names
* B.DATA_NODE_KEYS : fields with attributes (header X fields)
"""
@staticmethod
def rebuild_data(job, data: dict, tableAttr: dict) -> dict:
data = popTablesNode(job, data)
data = popNameNode(job, data)
data = addTableAttr(job, data, tableAttr)
return data
@staticmethod
def check_data(job, data: dict) -> dict:
checkNodes = {}
checkNodes[MUST_NODES] = [B.DATA_NODE_HEADER, B.DATA_NODE_KEYS]
checkNodes[MUSTNT_NODES] = [B.DATA_NODE_DATA]
checkNodes[OPT_NODES] = [B.DATA_NODE_FIELDS]
return check_nodes(job, data, checkNodes)
def popTablesNode(job, data: dict) -> dict:
if B.DATA_NODE_TABLES not in data:
return data
outdata = {}
# if double-DATA_NODE_TABLES
if B.DATA_NODE_TABLES in data and B.DATA_NODE_TABLES in data[B.DATA_NODE_TABLES]:
for k in data[B.DATA_NODE_TABLES][B.DATA_NODE_TABLES]:
if k in data[B.DATA_NODE_TABLES]:
print("Error")
else:
data[B.DATA_NODE_TABLES][k] = data[B.DATA_NODE_TABLES][B.DATA_NODE_TABLES][k]
data[B.DATA_NODE_TABLES].pop(B.DATA_NODE_TABLES)
if len(data[B.DATA_NODE_TABLES]) > 1:
job.m.setError("Mehr als eine Tabelle")
return data
elif len(data[B.DATA_NODE_TABLES]) == 0:
job.m.setError("Keine Tabelle")
return outdata
else:
for k in data[B.DATA_NODE_TABLES]:
outdata[k] = data[B.DATA_NODE_TABLES][k]
return outdata
def popNameNode(job, data: dict) -> dict:
outdata = {}
for k in data:
if "_"+D.FIELD_NAME in data[k] and k == data[k]["_"+D.FIELD_NAME]:
for l in data[k]:
outdata[l] = data[k][l]
else:
outdata[k] = data[k]
return outdata
def addTableAttr(job, data: dict, tableAttr: dict) -> dict:
for k in tableAttr:
if k == "_hit":
continue
data[k] = tableAttr[k]
return data
def rebuildCatalog(job, tdata: dict) -> dict:
return tdata
def insertCatalog(job, tdata, fields: list) -> dict:
row = buildRow(job, tdata, fields)
tdata[D.CSV_NODETYPE_KEYS][fields[tdata[D.DATA_ATTR_KEY]].strip()] = row
return tdata
def checkCatalog(job, tdata: dict) -> dict:
checkNodes = {}
checkNodes[MUST_NODES] = [B.DATA_NODE_HEADER, B.DATA_NODE_FIELDS, B.DATA_NODE_KEYS]
checkNodes[MUSTNT_NODES] = [B.DATA_NODE_DATA]
checkNodes[OPT_NODES] = []
return check_nodes(job, tdata, checkNodes)
class DatatypeDDL():
"""
structure:
* B.DATA_NODE_HEADER : list of ddl-attributes
* B.DATA_NODE_FIELDS : list of field-names
* B.DATA_NODE_KEYS : fields with attributes (header X fields)
"""
@staticmethod
def rebuild_data(job, data: dict, tableAttr: dict) -> dict:
data = popTablesNode(job, data)
data = popNameNode(job, data)
data = buildKeys(job, data)
data = addTableAttr(job, data, tableAttr)
return data
@staticmethod
def check_data(job, data: dict) -> dict:
checkNodes = {}
checkNodes[MUST_NODES] = [B.DATA_NODE_HEADER, B.DATA_NODE_FIELDS, B.DATA_NODE_KEYS]
checkNodes[MUSTNT_NODES] = [B.DATA_NODE_DATA]
checkNodes[OPT_NODES] = []
return check_nodes(job, data, checkNodes)
def buildKeys(job, data: dict) -> dict:
fields = []
keys = {}
if B.DATA_NODE_DATA in data and len(data[B.DATA_NODE_DATA]) > 1:
pass
else:
data.pop(B.DATA_NODE_DATA)
for k in data:
if k[:1] == "_":
continue
fields.append(k)
keys[k] = data[k]
for k in fields:
data.pop(k)
data[B.DATA_NODE_FIELDS] = fields
data[B.DATA_NODE_KEYS] = keys
return data
def insertDDL(job, tdata, fields: list) -> dict:
return tdata
def checkDDL(job, tdata: dict) -> dict:
checkNodes = {}
checkNodes[MUST_NODES] = [B.DATA_NODE_HEADER]
checkNodes[MUSTNT_NODES] = [B.DATA_NODE_DATA, B.DATA_NODE_FIELDS, B.DATA_NODE_KEYS]
checkNodes[OPT_NODES] = []
return check_nodes(job, tdata, checkNodes)
def rebuildSpec(job, tdata: dict) -> dict:
return tdata
def insertSpec(job, tdata, fields: list) -> dict:
tdata[B.DATA_NODE_DATA].append(fields)
return tdata
def checkSpec(job, tdata: dict) -> dict:
checkNodes = {}
checkNodes[MUST_NODES] = [B.DATA_NODE_HEADER, B.DATA_NODE_DATA]
checkNodes[MUSTNT_NODES] = [B.DATA_NODE_FIELDS, B.DATA_NODE_KEYS]
checkNodes[OPT_NODES] = []
return check_nodes(job, tdata, checkNodes)
def check_nodes(job, config: dict, checkNodes: dict):
mustNodes = checkNodes[MUST_NODES]
mustntNodes = checkNodes[MUSTNT_NODES]
optionalNodes = checkNodes[OPT_NODES]
a = str(config["_path"]).split(os.path.sep)
b = a[-1].split(".")
path = config["_path"]
if b[0] in config:
config = config[b[0]]
if len(config) == 2:
for x in B.LIST_SUBJECTS:
if x[:-1] in config:
config = config[x[:-1]]
break
for n in mustNodes:
if n not in config:
raise Exception("must-node doesnt exist "+n+" "+path)
for n in mustntNodes:
if n not in config:
continue
if len(config[n]) == 0:
job.m.logWarn("empty mustnt-node "+n+" "+path)
else:
raise Exception("must-node doesnt exist "+n+" "+path)
return config

37
tools/filecsv_fcts.py

@ -7,6 +7,7 @@
import json
import re
import basic.program
import model.factory
import tools.file_abstract
import basic.constants as B
import tools.data_const as D
@ -14,6 +15,7 @@ import tools.file_tool
from basic import toolHandling
import traceback
import tools.data_tool
import tools.file_type
DEFAULT_TTYPE = D.CSV_SPECTYPE_DATA
@ -44,7 +46,7 @@ class FileFcts(tools.file_abstract.FileFcts):
return False
def isBlock(self, msg, job, field, block, status):
def isBlock(self, msg, job, field: str, block: str, status: str) -> bool:
"""
detects the block either on keywords in the field which opens a block
or on status if there is no keyword in the field
@ -185,25 +187,30 @@ class FileFcts(tools.file_abstract.FileFcts):
elif (status == D.CSV_BLOCK_STEP):
if verify: print("block "+D.CSV_BLOCK_STEP+"2 :: "+l)
if verify: print("step-line "+status+": "+l)
#h = []
steps = setStepAttribute(job, steps, fields[1], fields)
tdata[B.SUBJECT_STEPS] = steps
#h.append(B.DATA_NODE_STEPS)
#tableDict = getTdataContent(msg, tdata, h)
#if verbose: print(">> setTableData " + str(h) + " " + str(tableDict))
#setTableData(tableDict, fields, ttype, job)
#tableDict = getTdataContent(msg, tdata, h)
#if verbose: print(">> setTableData " + str(h) + " " + str(tableDict))
#setTableData(tableDict, fields, ttype, job)
else:
if verify: print("block else :: "+l)
print("unbekannter Block "+status+": "+l)
# end for
# TODO !! refactor to file_type
tfdata = tools.file_type.rebuild_tdata(job, tdata, tableAttr, ttype)
tgdata = self.restParse(job, tableAttr, tdata, ttype)
if ttype in [D.CSV_SPECTYPE_CTLG, D.CSV_SPECTYPE_DDL]:
return tfdata
return tgdata
def restParse(self, job, tableAttr, tdata, ttype):
if D.DATA_ATTR_TYPE not in tableAttr:
tableAttr[D.DATA_ATTR_TYPE] = ttype
if ttype in [D.CSV_SPECTYPE_DDL, D.CSV_SPECTYPE_CTLG, D.CSV_SPECTYPE_MDL]:
if ttype+"s" in B.LIST_SUBJECTS:
print("csvfcts 198 "+ttype)
enty = model.factory.get_entity_object(job, ttype, {})
print(str(tdata))
tdata = enty.set_subtables(job, tdata)
elif ttype in [D.CSV_SPECTYPE_DDL, D.CSV_SPECTYPE_CTLG, D.CSV_SPECTYPE_MDL]:
if len(tdata[B.DATA_NODE_TABLES]) > 1:
job.m.setError("Mehr als einr Tabelle in "+ttype)
job.m.setError("Mehr als eine Tabelle in "+ttype)
elif len(tdata[B.DATA_NODE_TABLES]) == 0:
job.m.setError("Keine Tabelle in "+ttype)
tdata = {}
@ -227,10 +234,14 @@ class FileFcts(tools.file_abstract.FileFcts):
fields.append(f)
tdata[k][B.DATA_NODE_FIELDS] = fields
header = []
elif ttype in [D.CSV_SPECTYPE_DDL]:
data = {}
for k in tdata:
pass
if B.DATA_NODE_TABLES in tdata and B.DATA_NODE_TABLES in tdata[B.DATA_NODE_TABLES]:
for k in tdata[B.DATA_NODE_TABLES][B.DATA_NODE_TABLES]:
if k in tdata[B.DATA_NODE_TABLES]:
if verbose: print("Error")
print("Error")
else:
tdata[B.DATA_NODE_TABLES][k] = tdata[B.DATA_NODE_TABLES][B.DATA_NODE_TABLES][k]
tdata[B.DATA_NODE_TABLES].pop(B.DATA_NODE_TABLES)
@ -593,7 +604,7 @@ def setTableData(tableDict: dict, fields: list, ttype: str, job):
print("arg "+arg)
a = arg.split(":")
row[B.DATA_NODE_ARGS][a[0]] = a[1]
if ttype == D.CSV_SPECTYPE_DATA:
if ttype == D.CSV_SPECTYPE_DATA or ttype+"s" in B.LIST_SUBJECTS:
tableDict[B.DATA_NODE_DATA].append(row)
elif ttype in [D.CSV_SPECTYPE_KEYS, D.CSV_SPECTYPE_CTLG]:
tableDict[D.CSV_NODETYPE_KEYS][fields[tableDict[D.DATA_ATTR_KEY]].strip()] = row

Loading…
Cancel
Save