Data-Test-Executer Framework speziell zum Test von Datenverarbeitungen mit Datengenerierung, Systemvorbereitungen, Einspielungen, ganzheitlicher diversifizierender Vergleich
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 

481 lines
18 KiB

#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------------------------------------
# Author : Ulrich Carmesin
# Source : gitea.ucarmesin.de
# ---------------------------------------------------------------------------------------------------------
"""
This abstract class DbFcts defines the interface for any function relating to any kind of a database-management.
It uses the following configuration
.a) COMP.conf->artifact->db->[table] : for structural attributes of the database, maybe specialized for tables \n
.b) COMP.conf->artifact->ddl : for structural attributes of the database-tables \n
.c) COMP.conf->conn->[db] : for connection-attributes and structural attributes, maybe specialized for the database
The attribute db.type resolves which technique is used, implemented in a specific tool-class:
* db2,mysql,pgsql,(s)hive ... for specific synchronous RDBs
* spark,shive,sfile ... using spark-technology
* csv,(s)file ... for data managed in files
The main tasks are: \n
.1. connecting to the datebase - with attributes
* conn.ip, host, port, user, password, ... for synchronous db-connection
* conn.root, ... for path-definitions for file-implementations (csv, )
.2. delete at the initialisation - with
* db.reset for the context testcase, testsuite or never, on which the content will be deleted - default: testcase
* db.character of the table if the content will be be deleted in a sequence of testcases
* db.tabname -- if the tablename is differs from the node-name of the table - default: not necessary
* db.filename -- if the filename is differs from the node-name of the table - default: not necessary
* par.dbwhere which rows will be deleted - default empty
.3. insert testdata at the initialisation
* ddl.[fields]
* db.tabname - if the tablename is differs from the node-name of the table - default: not necessary
* db.filename - if the filename is differs from the node-name of the table - default: not necessary
.4. select the data with
* db.tabname - if the tablename is differs from the node-name of the table - default: not necessary
* db.filename - if the filename is differs from the node-name of the table - default: not necessary
* ddl._data.[fields].order for an order-clause
* par.dbwhere which rows will be deleted - default empty
SPECIAL CASES:
* If the table is partitioned tables the functions delete/insert/select calls the callback-functions
COMP.nextTable() resp. COMP.nextTdata().
"""
import json
import re
import basic.program
import basic.catalog
import tools.config_tool
import basic.constants as B
import tools.data_const as D
import tools.date_tool
import os
def get_ddl(job, compName, table):
"""
this function read the ddl from the right config
:param job:
:param compName:
:param table:
:return: ddl in correct format
"""
out = {}
conf = tools.config_tool.getConfig(job, D.DDL_FILENAME, compName, table)
if B.DATA_NODE_TABLES in conf and table in conf[B.DATA_NODE_TABLES]:
ddl = conf[B.DATA_NODE_TABLES][table]
elif table in conf:
ddl = conf[table]
else:
ddl = conf
# workaround because a csv-file is loaded in wrong format
if B.DATA_NODE_DATA in ddl:
header = []
for e in ddl[B.DATA_NODE_DATA]:
k = e[D.DDL_FIELD]
e.pop(D.DDL_FIELD)
header.append(k)
out[k] = e
for k in D.LIST_DATA_ATTR:
if k not in ddl:
continue
out[k] = ddl[k]
out[B.DATA_NODE_HEADER] = header
else:
out = ddl
return out
def getDbAttributes(job, comp, table):
"""
this function collects all relevant db-attributes from any location where it can be set.
The location could be
* comp.artifact.db.[table].attr
* comp.artifact.[db].[table].attr
"""
out = {
B.ATTR_DB_DATABASE: "",
B.ATTR_DB_SCHEMA: "",
B.ATTR_DB_TABNAME: "",
B.ATTR_DB_USER: "",
B.ATTR_DB_PASSWD: "",
B.ATTR_DB_HOST: "",
B.ATTR_DB_PARTITION: D.DEFAULT_DB_PARTITION,
B.ATTR_DB_CONN_JAR: D.DEFAULT_DB_CONN_JAR
}
for attr in out.keys():
if comp is None or not hasattr(comp, "conf"):
print("dbi ohne comp")
if B.TOPIC_NODE_DB not in job.conf:
raise Exception("Keine DB-Attribute in job")
if (attr in job.conf[B.TOPIC_NODE_DB]):
out[attr] = job.conf[B.TOPIC_NODE_DB][attr]
else:
if (B.SUBJECT_ARTS in comp.conf and table in comp.conf[B.SUBJECT_ARTS][B.TOPIC_NODE_DB]) \
and (attr in comp.conf[B.SUBJECT_ARTS][B.TOPIC_NODE_DB][table]):
out[attr] = comp.conf[B.SUBJECT_ARTS][B.TOPIC_NODE_DB][table][attr]
print("a " + attr + " " + out[attr])
elif (B.SUBJECT_ARTS in comp.conf and attr in comp.conf[B.SUBJECT_ARTS][B.TOPIC_NODE_DB]):
out[attr] = comp.conf[B.SUBJECT_ARTS][B.TOPIC_NODE_DB][attr]
print("b " + attr + " " + out[attr])
elif (B.TOPIC_NODE_DB in comp.conf[B.SUBJECT_CONN]) \
and (table in comp.conf[B.SUBJECT_CONN][B.TOPIC_NODE_DB]) \
and (attr in comp.conf[B.SUBJECT_CONN][table][B.TOPIC_NODE_DB]):
out[attr] = comp.conf[B.SUBJECT_CONN][table][B.TOPIC_NODE_DB][attr]
print("c " + attr + " " + out[attr])
elif (B.TOPIC_NODE_DB in comp.conf[B.SUBJECT_CONN]) \
and (attr in comp.conf[B.SUBJECT_CONN][B.TOPIC_NODE_DB]):
out[attr] = comp.conf[B.SUBJECT_CONN][B.TOPIC_NODE_DB][attr]
print("d " + attr + " " + out[attr])
elif (attr in comp.conf[B.SUBJECT_CONN]):
out[attr] = comp.conf[B.SUBJECT_CONN][attr]
print("e " + attr + " " + out[attr])
else:
print("f " + attr + " " + out[attr])
return out
def getStringIndex(text, intern):
if intern in text:
return text.index(intern)
return 999
def parseCondition(condition):
out = []
for operator in [">=", "<=", ">", "<", "=", " like ", " in "]:
if operator in condition:
i = condition.lower().index(operator)
field = condition[0:i]
attr = condition[i+len(operator):]
out = [operator.strip(), field.strip(), attr.strip()]
return out
return out
def parseConditions(condition):
""" the functions parses the condition into their syntactical parts:
parts = [ [ conjunctor , [ operator , field, attribute ], ... ]
the condition should be a simple normalform either with "and" or with "or"
"""
rest = condition
dbwhere = []
if (" and " in rest and " or " in rest):
raise Exception("the sql-condition must contain only \"ands\" or \"ors\" "+rest)
while (" and " in rest or " or " in rest):
iand = getStringIndex(rest, " and ")
ior = getStringIndex(rest, " or ")
print("conjunctors " + str(iand) + " " + str(ior))
if iand == 999 and ior == 999:
print("fertig")
elif iand < ior:
conjunctor = "and"
condition = rest[0:iand]
rest = rest[iand + 5:]
cond = parseCondition(condition)
cond.append(conjunctor)
dbwhere.append(cond)
elif iand > ior:
conjunctor = "or"
condition = rest[0:ior]
rest = rest[ior + 4:]
cond = parseCondition(condition)
cond.append(conjunctor)
dbwhere.append(cond)
cond = parseCondition(rest)
cond.append("end")
dbwhere.append(cond)
return dbwhere
def parseSQLwhere(condition, ddl=None):
parts = parseConditions(condition)
conjunctor = ""
dbwhere = ""
for cond in parts:
print("exp "+cond[1]+" "+str(ddl))
if cond[1] in ddl[B.DATA_NODE_HEADER]:
dbwhere += " "+conjunctor+" "+cond[1]+" "+cond[0]+" "+cond[2]
conjunctor = cond[3]
print("exp ok")
return "WHERE "+dbwhere.strip()
def getSqlTable(job, comp, table):
"""
the function gets the technical tablename inclusive necessary schema information
:param comp:
:param table:
:return:
"""
attr = getDbAttributes(job, comp, table)
if attr[B.ATTR_DB_TABNAME] != "":
sqltable = attr[B.ATTR_DB_TABNAME]
else:
sqltable = table
if attr[B.ATTR_DB_SCHEMA] != "":
sqltable = attr[B.ATTR_DB_SCHEMA] + "." + sqltable
return sqltable
def getTechnicalIDFields(ddl):
ids = []
keys = {}
for f in ddl:
if f[0:1] == "_":
continue
if "T" in ddl[f][D.DDL_KEY]:
keys[ddl[f][D.DDL_KEY]] = f
for k in keys:
ids.append(keys[k])
return ids
def isConditionInFields(cond, ddl):
a = cond.split(" ")
for x in a:
if x in ddl:
return True
return False
def formatDbRows(table, comp, rows):
out = []
fields = comp.conf[B.DATA_NODE_DDL][table]
header = comp.conf[B.DATA_NODE_DDL][table][B.DATA_NODE_HEADER]
for r in rows:
ro = {}
for f in header:
if f in r:
val = formatDbField(comp, r[f], fields[f])
ro[f] = val
else:
val = formatDbField(comp, B.SVAL_NULL, fields[f])
ro[f] = val
out.append(ro)
return out
def formatDbField(comp, val, field):
if val == B.SVAL_NULL:
if field[D.DDL_CONSTRAINT] != B.SVAL_YES:
comp.m.logError("must-field is null " + field[D.DDL_FIELD])
return None
print("formatDbField "+str(comp))
print("formatDbField "+str(field)+" , "+str(val))
return formatDbVal(comp.m, val, field[D.DDL_TYPE])
def formatDbVal(msg, val, dtyp):
ctlg = basic.catalog.Catalog.getInstance()
if dtyp == D.TYPE_STRING or dtyp == D.TYPE_STR:
if not isinstance(val, str):
msg.logError("field must be " + dtyp + ", " + str(val))
if isinstance(val, dict):
val = json.dumps(val)
if isinstance(val, str):
val = val.replace("\"", "\\\"")
val = val.replace("\'", "\\\"")
return str(val)
if dtyp == D.TYPE_DATE:
if not isinstance(val, str):
msg.logError("field must be " + dtyp + ", " + str(val))
return tools.date_tool.getFormatDatetupel(tools.date_tool.parseDate(val), tools.date_tool.F_DB_DATE)
if dtyp == D.TYPE_TIME:
if not isinstance(val, str):
msg.logError("field must be " + dtyp + ", " + str(val))
return tools.date_tool.getFormatDatetupel(tools.date_tool.parseDate(val), tools.date_tool.F_DB_TIME)
if dtyp in [D.TYPE_INT, D.TYPE_PK]:
if not (isinstance(val, int) or re.match(r"^\d+$", val)):
msg.logError("field must be " + dtyp + ", " + str(val))
return 0
return int(val)
if dtyp == D.TYPE_FLOAT:
if not (isinstance(val, float) or re.match(r"^\d+[.,]\d+$", val)):
msg.logError("field must be " + dtyp + ", " + str(val))
return 0
return float(val)
else:
pass
def isCompTable(comp, job, data, table):
""" checks if the table in data relates to the component """
print(str(data))
return isCompRow(comp, data[B.DATA_NODE_TABLES][table])
def isCompRow(comp, row):
""" checks if the table in data relates to the component """
print("isCompRow "+comp.name+" "+str(row))
if comp.name in row[B.ATTR_DATA_COMP] \
and row[B.ATTR_DATA_COMP][comp.name] in comp.conf[B.SUBJECT_ARTS][B.TOPIC_NODE_DB]:
return True
return False
# ---------------------------------------------------------------------------------------------------------------
class DbFcts():
"""
This interface defines each necessary connection to any kind of database.
The specific technique how to connect to the concrete DBMS has to be implemented in the specific tool.
"""
def __init__(self):
self.comp = None
pass
def setComp(self, job, comp):
self.job = job
self.comp = comp
def getDbAttributes(self, job, table):
"""
this function collects all relevant db-attributes from any location where it can be set.
The location could be
* comp.artifact.db.[table].attr
* comp.artifact.[db].[table].attr
"""
return getDbAttributes(job, self.comp, table)
def selectTables(self, subdir, job):
""" method to delete rows from a database
statement written in sql """
self.loadDdl(job)
tdata = {}
tdata[subdir] = {}
for t in self.comp.conf[B.DATA_NODE_DDL]:
tdata[subdir][t] = self.selectRows(t, job)
if B.DATA_NODE_DATA not in tdata[subdir][t]:
raise Exception("missing data node in table")
tdata[subdir][t][D.DATA_ATTR_COUNT] = len(tdata[subdir][t][B.DATA_NODE_DATA])
tdata[subdir][t][D.DATA_ATTR_DATE] = tools.date_tool.getActdate(tools.date_tool.F_DE)
self.comp.m.logMsg("Tabelle {} mit {} Zeilen gelesen".format(t, len(tdata[subdir][t][B.DATA_NODE_DATA])))
return tdata
def selectRows(self, table, job):
""" method to select rows from a database
statement written in sql """
raise Exception(B.EXCEPT_NOT_IMPLEMENT)
def reset_TData(self, job):
self.deleteTables(job)
def deleteTables(self, job):
""" method to delete rows from a database
statement written in sql """
self.loadDdl(job)
for t in self.comp.conf[B.DATA_NODE_DDL]:
print("zu loeschende Tabelle "+t)
self.deleteRows(job, t)
def deleteRows(self, job, table):
""" method to delete rows from a database
statement written in sql """
raise Exception(B.EXCEPT_NOT_IMPLEMENT)
def updateRows(self, statement, job):
""" method to delete rows from a database
statement written in sql """
raise Exception(B.EXCEPT_NOT_IMPLEMENT)
def getConnector(self):
""" add-on-method to get the connector
this method should only called by the class itself """
raise Exception(B.EXCEPT_NOT_IMPLEMENT)
def insertTables(self, tdata, job):
"""
method to insert rows into the database of the component
"""
# TODO wird der Tabellenname/DB/Schema unter tdata gespeichert?
plainname = basic.componentHandling.getPlainCompname(self.comp.name)
self.loadDdl(job)
for t in tdata[B.DATA_NODE_TABLES]:
print("einzufuegende Tabelle "+self.comp.name+" "+t)
if isCompTable(self.comp, job, tdata, t):
self.insertRows(job, t, tdata[B.DATA_NODE_TABLES][t][B.DATA_NODE_DATA])
self.comp.m.logMsg("in Tabelle {} {} Zeilen eingefuegt".format(
t, len(tdata[B.DATA_NODE_TABLES][t][B.DATA_NODE_DATA])))
def insertRows(self, job, table, rows):
""" method to insert rows into a database
the rows will be interpreted by the ddl of the component
"""
raise Exception(B.EXCEPT_NOT_IMPLEMENT)
def execStatement(self, statement, conn=None):
""" add-on-method to execute the statement
this method should only called by the class itself """
raise Exception(B.EXCEPT_NOT_IMPLEMENT)
def loadDdl(self, job):
"""" load the DDL for each database-table
the ddl are mostly stored as csv in the component-folder """
if (B.DATA_NODE_DDL in self.comp.conf):
return
conf = tools.config_tool.getConfig(job, D.DDL_FILENAME, self.comp.name)
self.comp.conf[B.DATA_NODE_DDL] = {}
for k in conf[self.comp.name]:
self.comp.conf[B.DATA_NODE_DDL][k] = conf[self.comp.name][k]
return conf[self.comp.name]
def getWhere(self):
return ""
def getOrder(self):
return ""
def getDbValue(self, fo, pvalue):
value = str(formatDbField(self.comp, pvalue, fo))
if len(value.strip()) == 0:
if D.DDL_CONSTRAINT not in fo or fo[D.DDL_CONSTRAINT] == B.SVAL_YES:
return self.getDbNull()
if fo[D.DATA_NODE_TYPE] == D.TYPE_STRING or fo[D.DATA_NODE_TYPE] == D.TYPE_STR:
return "\'"+value.strip()+"\'"
elif fo[D.DATA_NODE_TYPE] in [D.TYPE_INT, D.TYPE_PK]:
return value.strip()
elif fo[D.DATA_NODE_TYPE] == D.TYPE_DOUBLE:
return self.getDbDouble(value)
elif fo[D.DATA_NODE_TYPE] == D.TYPE_FLOAT:
return self.getDbFloat(value)
elif fo[D.DATA_NODE_TYPE] == D.TYPE_DATE:
return self.getDbDate(value)
elif fo[D.DATA_NODE_TYPE] == D.TYPE_TIME:
return self.getDbTime(value)
def getDbDouble(self, value):
return value
def getDbFloat(self, value):
return value
def getDbDate(self, value):
return value
def getDbTime(self, value):
return value
def getDbNull(self):
return B.SVAL_NULL
def getSchemaAttribut(self, attr, atype):
raise Exception(B.EXCEPT_NOT_IMPLEMENT)
def getSchemaSubtable(self, parent, attr, atype):
raise Exception(B.EXCEPT_NOT_IMPLEMENT)
def getSchemaIndex(self, attr):
raise Exception(B.EXCEPT_NOT_IMPLEMENT)
def getSubTableName(self, parent, attr):
if "_"+attr in parent:
return parent
return parent+"_"+attr
def getSubTableId(self, parent, attr):
pabr = parent.split("_")[0]
if pabr == "idx":
pabr = parent.split("_")[1]
return pabr+attr[0:3]+"id"
def getIndexName(self, table, attr):
if attr in table:
return "idx_"+table
return "idx_"+table+"_"+attr
def getInsertFields(self, ddl):
header = []
for f in ddl[B.DATA_NODE_HEADER]:
if D.DDL_TYPE in ddl[f] and ddl[f][D.DDL_TYPE] == D.TYPE_PK:
continue
header.append(f)
return header