@ -26,6 +26,7 @@ import basic.program
import utils . file_tool
import basic . constants as B
import utils . data_const as D
import utils . date_tool
TOOL_NAME = " tdata_tool "
""" name of the tool in order to switch debug-info on """
@ -71,13 +72,24 @@ def getTestdata():
# read file in testdata
job . m . logInfo ( " Test-Data readed from " + tdata [ D . ATTR_SRC_TYPE ] + " for " + tdata [ D . ATTR_SRC_NAME ] )
elif tdata [ D . ATTR_SRC_TYPE ] == D . DATA_SRC_DIR :
filename = os . path . join ( job . conf . getJobConf ( B . SUBJECT_PATH + " : " + B . D . ATTR_PATH_TDATA ) , tdata [ D . ATTR_SRC_NAME ] , " testspec.csv " )
path = os . path . join ( job . conf . getJobConf ( B . SUBJECT_PATH + " : " + B . ATTR_PATH_TDATA ) , tdata [ D . ATTR_SRC_NAME ] )
filename = os . path . join ( path , " testspec.csv " )
data = getCsvSpec ( job . m , filename , D . CSV_SPECTYPE_DATA )
for k in data :
tdata [ k ] = data [ k ]
if ( k == D . CSV_BLOCK_OPTION ) :
for p in data [ k ] :
setattr ( job . par , p , data [ k ] [ p ] )
files = utils . file_tool . getFiles ( job . m , path , " table_ " , None )
for f in files :
print ( f )
filename = os . path . join ( path , f )
data = readCsv ( job . m , filename , None )
table = f [ 6 : - 4 ]
print ( filename + " " + table )
if B . DATA_NODE_TABLES not in tdata :
tdata [ B . DATA_NODE_TABLES ] = { }
tdata [ B . DATA_NODE_TABLES ] [ table ] = data [ B . DATA_NODE_TABLES ] [ table ]
else :
job . m . setFatal ( " test-Data: reftyp " + tdata [ D . ATTR_SRC_TYPE ] + " is not implemented " )
return tdata
@ -110,7 +122,7 @@ def parseCsvSpec(msg, lines, type):
header = [ ]
h = [ ] # from a[]
status = " start "
tableDate = utils . date_tool . getActdate ( utils . date_tool . F_DE )
tableDict = { }
for l in lines :
print ( " lines " + l )
@ -129,11 +141,24 @@ def parseCsvSpec(msg, lines, type):
if ( not B . DATA_NODE_STEPS in data ) :
data [ B . DATA_NODE_STEPS ] = [ ]
step = { }
step [ B . DATA_NODE_COMP ] = fields [ 1 ]
step [ B . ATTR_DATA_REF ] = fields [ 2 ]
step [ B . DATA_NODE_COMP ] = fields [ D . STEP_COMP_I ]
step [ B . ATTR_EXEC_REF ] = fields [ D . STEP_EXECNR_I ]
step [ B . ATTR_DATA_REF ] = fields [ D . STEP_REFNR_I ]
step [ B . ATTR_STEP_ARGS ] = { }
a = fields [ 3 ] . split ( " , " )
if D . STEP_ARGS_I == D . STEP_LIST_I :
args = " "
for i in range ( D . STEP_ARGS_I , len ( fields ) ) :
if len ( fields [ i ] ) < 1 :
continue
if fields [ i ] [ 0 : 1 ] == " # " :
continue
args + = " , " + fields [ i ]
args = args [ 1 : ]
else :
args = fields [ D . STEP_ARGS_I ]
a = args . split ( " , " )
for arg in a :
print ( " arg " + arg )
b = arg . split ( " : " )
if len ( b ) < 2 :
raise Exception ( D . EXCP_MALFORMAT + " " + l )
@ -145,12 +170,17 @@ def parseCsvSpec(msg, lines, type):
raise Exception ( D . EXCP_MALFORMAT + " " + l )
data [ a [ 0 ] ] [ a [ 1 ] ] = fields [ 1 ]
continue
elif a [ 0 ] . lower ( ) == D . DATA_ATTR_DATE :
tableDate = fields [ 1 ]
elif ( a [ 0 ] . lower ( ) in D . CSV_HEADER_START ) :
# create deep structure a_0 ... a_n
print ( " tdata 136 CSV_HEADER_START " + str ( len ( a ) ) )
h = a
data [ B . DATA_NODE_TABLES ] = { }
header = [ ]
if B . DATA_NODE_TABLES not in data :
data [ B . DATA_NODE_TABLES ] = { }
h [ 0 ] = B . DATA_NODE_TABLES
comps = { }
tableDict = getTabContent ( msg , data , h )
i = 0
for f in fields :
@ -171,6 +201,7 @@ def parseCsvSpec(msg, lines, type):
headerFields = [ ]
else :
tableDict [ B . DATA_NODE_DATA ] = [ ]
tableDict [ D . DATA_ATTR_DATE ] = tableDate
setTabContent ( msg , data , tableDict , h )
status = D . CSV_SPECTYPE_DATA
continue
@ -179,14 +210,25 @@ def parseCsvSpec(msg, lines, type):
# fill data
tableDict = getTabContent ( msg , data , h )
row = { }
print ( fields )
i = 1
# case-differentiation DATA or TREE
for f in header :
print ( str ( i ) + " " + str ( len ( fields ) ) + " " + str ( len ( header ) ) )
row [ f ] = fields [ i ]
if type == D . CSV_SPECTYPE_TREE :
tableDict [ B . DATA_NODE_DATA ] [ f ] = fields [ i ]
i + = 1
if type == D . CSV_SPECTYPE_DATA :
print ( " parseSpec " + str ( fields [ 0 ] ) )
row [ B . ATTR_DATA_COMP ] = { }
for c in fields [ 0 ] . split ( " , " ) :
a = c . split ( " : " )
print ( " parseSpec " + str ( a ) )
comps [ a [ 0 ] ] = a [ 1 ]
row [ B . ATTR_DATA_COMP ] [ a [ 0 ] ] = a [ 1 ]
#row[B.ATTR_DATA_COMP] = fields[0].split(",")
tableDict [ B . ATTR_DATA_COMP ] = comps
tableDict [ B . DATA_NODE_DATA ] . append ( row )
elif type == D . CSV_SPECTYPE_KEYS :
tableDict [ D . CSV_NODETYPE_KEYS ] [ fields [ 1 ] ] = row
@ -205,6 +247,13 @@ def parseCsvSpec(msg, lines, type):
return data
def mergeTableComponents ( comps , rowComps ) :
for c in rowComps . split ( " , " ) :
a = c . split ( " : " )
comps [ a [ 0 ] ] = a [ 1 ]
return comps
def setTabContent ( msg , data , tabledata , path ) :
if len ( path ) > = 2 and path [ 1 ] not in data [ path [ 0 ] ] :
data [ path [ 0 ] ] [ path [ 1 ] ] = { }
@ -238,13 +287,15 @@ def getTabContent(msg, data, path):
def readCsv ( msg , filename , comp , aliasNode = " " ) :
lines = utils . file_tool . readFileLines ( filename , msg )
print ( " readCsv " + filename )
print ( lines )
return parseCsv ( msg , filename , lines , comp , aliasNode )
def parseCsv ( msg , filename , lines , comp , aliasNode = " " ) :
job = basic . program . Job . getInstance ( )
verify = - 4 + job . getDebugLevel ( TOOL_NAME )
job . debug ( verify , " # # # # # # # # parseCsv " + filename + " : " + comp . name + " : " + str ( lines ) )
job . debug ( verify , " # # # # # # # # parseCsv " + filename + " : " + str ( lines ) )
fields = [ ]
nodes = [ ]
columns = [ ]
@ -264,14 +315,22 @@ def parseCsv(msg, filename, lines, comp, aliasNode=""):
job . debug ( verify , str ( state ) + " line " + line + " : " + str ( len ( fields ) ) + " : " + str ( fields ) )
if len ( testline ) < 2 and state < 1 :
state = 0
elif a [ 0 ] . lower ( ) == D . ATTR_TABLE _DATE :
elif a [ 0 ] . lower ( ) == D . DATA_ ATTR_DATE:
tableDate = fields [ 1 ]
elif a [ 0 ] . lower ( ) == D . ATTR_TABLE_CNT :
state = 1
elif a [ 0 ] . lower ( ) == D . DATA_ATTR_COUNT :
tableCnt = fields [ 1 ]
elif a [ 0 ] . lower ( ) in D . CSV_HEADER_START :
state = 1
elif a [ 0 ] . lower ( ) in D . CSV_HEADER_START or \
( comp is not None and state == 1
and isCompTableFile ( comp , filename ) ) :
state = 2
columns = [ ]
h = a
if len ( h ) < 2 and comp is not None :
a = [ " table " , basename ]
h = a
startCols = 0
cnt = len ( fields )
job . debug ( verify , str ( state ) + " cnt " + str ( cnt ) )
data [ B . DATA_NODE_TABLES ] = { }
@ -287,10 +346,11 @@ def parseCsv(msg, filename, lines, comp, aliasNode=""):
nodes . append ( h [ i ] )
job . debug ( verify , str ( state ) + " nodes " + str ( nodes ) )
tableDict = getTabContent ( msg , data , h )
tableDict [ B . ATTR_DATA_COMP ] = { }
if len ( tableDate ) > 6 :
tableDict [ D . ATTR_TABLE _DATE ] = tableDate
tableDict [ D . DATA_ ATTR_DATE] = tableDate
if int ( tableCnt ) > 0 :
tableDict [ D . ATTR_TABLE_ CNT ] = tableCnt
tableDict [ D . DATA_ ATTR_COU NT] = tableCnt
j = 0
for i in range ( 1 , cnt ) :
if fields [ i ] [ 0 : 1 ] == " _ " :
@ -308,7 +368,14 @@ def parseCsv(msg, filename, lines, comp, aliasNode=""):
tableDict = getTabContent ( msg , data , h )
state = 3
row = { }
print ( line )
if startCols > 0 :
row [ B . ATTR_DATA_COMP ] = { }
row [ B . ATTR_DATA_COMP ] [ a [ 0 ] ] = a [ 1 ]
tableDict [ B . ATTR_DATA_COMP ] [ a [ 0 ] ] = a [ 1 ]
for i in range ( startCols , cnt + startCols ) :
print ( " for " + str ( i ) + " " + str ( len ( row ) ) + " " + str ( startCols ) + " " + str ( len ( fields ) ) )
print ( str ( fields [ i ] ) )
if i > = len ( columns ) + startCols :
break
row [ columns [ i - startCols ] ] = fields [ i ]
@ -358,25 +425,38 @@ def buildCsvData(filename, tdata, comp):
: param comp : if specific else None
: return :
"""
compColumn = not isCompTableFile ( comp , filename )
job = basic . program . Job . getInstance ( )
verify = - 1 + job . getDebugLevel ( TOOL_NAME )
job . debug ( verify , " writeDataTable " + str ( comp ) )
text = " "
for k in [ D . ATTR_TABLE _DATE , D . ATTR_TABLE_ CNT ] :
for k in [ D . DATA_ ATTR_DATE, D . DATA_ ATTR_COU NT] :
if k in tdata :
text + = k + " ; " + tdata [ k ] + " \n "
text + = " table "
text + = k + " ; " + str ( tdata [ k ] ) + " \n "
header = " table "
for f in tdata [ B . DATA_NODE_HEADER ] :
text + = " ; " + f
header + = " ; " + f
if compColumn :
text + = header
else :
#text += "_nr;" + header[6:] + "\n"
text + = header [ 6 : ] + " \n "
i = 0
for r in tdata [ B . DATA_NODE_DATA ] :
text + = " \n "
row = " "
i + = 1
for f in tdata [ B . DATA_NODE_HEADER ] :
if f in r :
text + = " ; " + str ( r [ f ] )
row + = " ; " + str ( r [ f ] )
else :
text + = " ; "
text + = " \n "
return text
row + = " ; "
if compColumn :
text + = row
else :
text + = row [ 1 : ]
#text += str(i) + row
text + = " \n "
return text
def writeCsvData ( filename , tdata , comp ) :
@ -386,3 +466,14 @@ def writeCsvData(filename, tdata, comp):
text + = buildCsvData ( filename , tdata [ B . DATA_NODE_TABLES ] [ k ] , comp )
text + = " \n "
utils . file_tool . writeFileText ( comp . m , filename , text )
def isCompTableFile ( comp , filename ) :
""" check if the filename belongs to the component """
basetable = os . path . basename ( filename ) [ 0 : - 4 ]
if comp is None :
return False
if B . TOPIC_NODE_DB in comp . conf [ B . SUBJECT_ARTS ] and basetable in comp . conf [ B . DATA_NODE_DDL ] \
and comp . name in filename :
return True
return False