Data-Test-Executer Framework speziell zum Test von Datenverarbeitungen mit Datengenerierung, Systemvorbereitungen, Einspielungen, ganzheitlicher diversifizierender Vergleich
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

106 lines
3.4 KiB

3 years ago
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------------------------------------
# Author : Ulrich Carmesin
# Source : gitea.ucarmesin.de
# ---------------------------------------------------------------------------------------------------------
"""
This class is a technical implementation for Hive-connection with spark - typically used in a
Machine Learning environment for example in hadoop
"""
import json
import os
3 years ago
import basic.program
import utils.config_tool
import utils.db_abstract
import pyspark
import basic.constants as B
class DbFcts(utils.db_abstract.DbFcts):
"""
This interface defines each necessary connection to any kind of database.
The specific technique how to connect to the concrete DBMS has to be implemented in the specific tool.
"""
def __init__(self):
pass
def selectRows(self, table):
""" method to select rows from a database
statement written in sql """
tdata = {}
dry = 0
# attr = self.getDbAttributes(table)
job = basic.program.Job.getInstance()
verify = -1+job.getDebugLevel("db_tool")
cmd = "SELECT "+",".join(self.comp.conf[B.DATA_NODE_DDL][table][B.DATA_NODE_HEADER])
3 years ago
cmd += " FROM "+table
sqls = self.comp.composeSqlClauses(cmd)
data = []
for k in sqls.keys():
sql = sqls[k]
if dry == 1:
spark = self.getConnector()
df = spark.sql(sql)
dfj = df.toJSON()
for r in dfj.collect():
data.append(json.loads(r))
3 years ago
else:
print("select "+sql)
self.comp.m.logInfo(sql)
tdata[B.DATA_NODE_HEADER] = self.comp.conf[B.DATA_NODE_DDL][table][B.DATA_NODE_HEADER]
tdata[B.DATA_NODE_DATA] = data
3 years ago
return tdata
def deleteRows(self, table):
""" method to delete rows from a database
statement written in sql """
job = basic.program.Job.getInstance()
dry = 0
verify = -1+job.getDebugLevel("db_tool")
cmd = "DELETE FROM "+table
print("deleteRows "+cmd)
sqls = self.comp.composeSqlClauses(cmd)
print("deleteRows "+cmd)
print(sqls)
for k in sqls.keys():
sql = sqls[k]
if dry == 1:
#spark = self.getConnector()
#df = spark.sql(cmd)
pass
else:
print("select "+sql)
#self.comp.m.logInfo(cmd)
def insertRows(self, table, rows):
""" method to insert rows into a database
the rows will be interpreted by the ddl of the component
"""
job = basic.program.Job.getInstance()
verify = -1+job.getDebugLevel("db_tool")
spark = self.getConnector()
df = spark.createDataFrame(rows)
self.comp.m.logInfo("cmd")
def getConnector(self):
""" add-on-method to get the connector
this method should only called by the class itself """
job = basic.program.Job.getInstance()
attr = self.getDbAttributes("null")
spark = None
if B.ATTR_DB_CONN_JAR in attr:
connectorJar = os.environ.get(attr[B.ATTR_DB_CONN_JAR])
spark = pyspark.SparkSession\
.builder\
.appName("datest")\
.config("spark.jars", f"{connectorJar}")\
.getOrCreate()
3 years ago
return spark