content
stringlengths 255
17.2k
|
---|
model('en', if_exists='ignore')
pretrained_model = fasttext.load_model('cc.en.300.bin')
os.chdir(cwd)
self.profiler.set_params(text_process__vectorizer__external_model = pretrained_model)
self.profiler.set_params(text_process__vectorizer__external_model_type = 'binary')
"""
return code.replace('\\n', '\\n'+(indent * TAB_CHAR))
def __profiler_main_code(params, indent=2):
code = f"""
df = self.profiler.transform(df)
columns = {params['output_features']}
if isinstance(df, scipy.sparse.spmatrix):
df = pd.DataFrame(df.toarray(), columns=columns)
else:
df = pd.DataFrame(df, columns=columns)
return df
"""
return code.replace('\\n', '\\n'+(indent * TAB_CHAR))
def feature_selector_code( params, indent=0):
modules = [
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}
]
code = """
class selector():
# this class
def __init__(self):
pass
def run(self, df):"""
code +=f"""
return df[{params['output_features']}]
"""
return code, modules
def feature_reducer_code( params, indent=0):
modules = [
{'module': 'joblib', 'mod_from': None, 'mod_as': None},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}
]
code = f"""
class selector():
def __init__(self):
reducer_file = (Path(__file__).parent/"model")/"{params['reducer_file']}"
if not reducer_file.exists():
raise ValueError(f'Failed to load Feature Engineering model file: {{reducer_file}}')
self.model = joblib.load(reducer_file)
def run(self, df):
reducer_input = {params['input_features']}
reducer_output = {params['output_features']}
df = self.model.transform(df[reducer_input])
return pd.DataFrame(df,columns=reducer_output)
"""
if indent:
code = code.replace('\\n', '\\n'+(indent * TAB_CHAR))
return code, modules
def create_feature_list(config=None, target_feature=None, deploy_path=None):
featurelist = []
if 'profiler' in config:
if 'input_features_type' in config['profiler']:
input_features = config['profiler']['input_features_type']
for x in input_features:
featurelt={}
featurelt['feature'] = x
if x == target_feature:
featurelt['Type'] = 'Target'
else:
if input_features[x] in ['int','int64','float','float64']:
featurelt['Type'] = 'Numeric'
elif input_features[x] == 'object':
featurelt['Type'] = 'Text'
elif input_features[x] == 'category':
featurelt['Type'] = 'Category'
else:
featurelt['Type'] = 'Unknown'
featurelist.append(featurelt)
featurefile = f"""
import json
def getfeatures():
try:
features = {featurelist}
outputjson = {{"status":"SUCCESS","features":features}}
output = json.dumps(outputjson)
print("Features:",output)
return(output)
except Exception as e:
output = {{"status":"FAIL","message":str(e).strip(\\'"\\')}}
print("Features:",json.dumps(output))
return (json.dumps(output))
if __name__ == "__main__":
output = getfeatures()
"""
with open( deploy_path/'featureslist.py', 'wb') as f:
f.write( str(featurefile).encode('utf8'))
def requirement_file(deploy_path,model,textFeatures,learner_type='ML'):
modules = ['pandas','numpy','alibi','matplotlib','joblib','shap','ipython','category_encoders','scikit-learn','word2number','flask_restful','evidently','Flask-Cors']
requires = ''
for mod in modules:
requires += f"{mod}=={version(mod)}\\n"
if len(textFeatures) > 0:
tmodules = ['spacy','nltk','textblob','demoji','beautifulsoup4','text-unidecode','pyspellchecker','contractions','protobuf']
for mod in tmodules:
requires += f"{mod}=={version(mod)}\\n"
if model == 'Extreme Gradient Boosting (XGBoost)':
mmodules = ['xgboost']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model == 'Light Gradient Boosting (LightGBM)':
mmodules = ['lightgbm']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model == 'Categorical Boosting (CatBoost)':
mmodules = ['catboost']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model.lower() == 'arima':
mmodules = ['pmdarima']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model.lower() == 'fbprophet':
mmodules = ['prophet']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model.lower() == 'lstm' or model.lower() == 'mlp' or learner_type =='DL':
mmodules = ['tensorflow']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model.lower() in ['cox', 'kaplanmeierfitter']: #bug 12833
mmodules = ['lifelines']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model.lower() == 'sentencetransformer': #bug 12833
mmodules = ['sentence_transformers']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
with open( deploy_path/'requirements.txt', 'wb') as f:
f.write(str(requires).encode('utf8'))
def create_readme_file(deploy_path,modelfile,features):
data = json.dumps([{x:x+'_value'} for x in features])
backslash_data = data.replace('"', '\\\\"')
content = f"""
========== Files Structures ==========
{modelfile} ------ Trained Model
aion_prediction.py --> Python package entry point
script/inputprofiler.py --> Profiling like FillNA and Category to Numeric
========== How to call the model ==========
============== From Windows Terminal ==========
python aion_prediction.py "{backslash_data}"
============== From Linux Terminal ==========
python aion_prediction.py "{data}"
============== Output ==========
{{"status":"SUCCESS","data":[{{"Data1":"Value","prediction":"Value"}}]}} ## for single Row/Record
{{"status":"SUCCESS","data":[{{"Data1":"Value","prediction":"Value"}},{{"Data1":"Value","prediction":"Value"}}]}} ## For Multiple Row/Record
{{"status":"ERROR","message":"description"}} ## In Case Exception or Error
"""
filename = deploy_path/'readme.txt'
with open(filename, 'w') as f:
f.write(content)
def create_util_folder(deploy_path):
import tarfile
ext_path = Path(__file__).parent.parent/'utilities'
for x in ext_path.iterdir():
if x.suffix == '.tar':
if x.name not in ['scikit_surprise-1.1.1.dist-info.tar','surprise.tar']:
my_tar = tarfile.open(x)
my_tar.extractall(deploy_path)
my_tar.close()
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import platform
import json
import shutil
import logging
class aionPrediction:
def __init__(self):
self.log = logging.getLogger('eion')
def create_optimus_prediction_file (self,classname,deploy_path,learner_type):
self.predictionFile = 'import warnings'
self.predictionFile += '\\n'
self.predictionFile += 'warnings.filterwarnings("ignore")'
self.predictionFile += '\\n'
self.predictionFile += 'import json'
self.predictionFile += '\\n'
self.predictionFile += 'import os'
self.predictionFile += '\\n'
self.predictionFile += 'import sys'
self.predictionFile += '\\n'
self.predictionFile += 'import pandas as pd'
self.predictionFile += '\\n'
self.predictionFile += 'from pandas import json_normalize'
self.predictionFile += '\\n'
self.predictionFile += 'from importlib import import_module'
self.predictionFile += '\\n'
self.predictionFile += 'import importlib.util'
self.predictionFile += '\\n'
self.predictionFile += 'class prediction:'
self.predictionFile += '\\n'
self.predictionFile += ' def predict_from_json(self,json_data):'
self.predictionFile += '\\n'
self.predictionFile += ' data = json.loads(json_data)'
self.predictionFile += '\\n'
self.predictionFile += ' output=self.predict(data)'
self.predictionFile += '\\n'
self.predictionFile += ' print("predictions:",output)'
self.predictionFile += '\\n'
self.predictionFile += '\\n'
self.predictionFile += ' def predict_from_file(self,filename):'
self.predictionFile += '\\n'
self.predictionFile += ' with open(filename,\\'r\\',encoding=\\'utf-8\\') as f:'
self.predictionFile += '\\n'
self.predictionFile += ' data = json.load(f)'
self.predictionFile += '\\n'
self.predictionFile += ' output=self.predict(data)'
self.predictionFile += '\\n'
self.predictionFile += ' print("predictions:",output)'
self.predictionFile += '\\n'
self.predictionFile += '\\n'
self.predictionFile += ' def predict(self,json_data):'
self.predictionFile += '\\n'
self.predictionFile += ' try:'
self.predictionFile += '\\n'
#self.predictionFile += ' jsonData = json.loads(json_data)'
self.predictionFile += ' jsonData=json_data'
self.predictionFile += '\\n'
self.predictionFile += ' model_obj = importlib.util.spec_from_file_location("module.name", os.path.dirname(os.path.abspath(__file__))+"/trained_model.py")'
self.predictionFile += '\\n'
self.predictionFile += ' model = importlib.util.module_from_spec(model_obj)'
self.predictionFile += '\\n'
self.predictionFile += ' model_obj.loader.exec_module(model)'
self.predictionFile += '\\n'
#if(learner_type != 'TextML'):
self.predictionFile += ' profiler_obj = importlib.util.spec_from_file_location("module.name", os.path.dirname(os.path.abspath(__file__))+"/inputprofiler.py")'
self.predictionFile += '\\n'
self.predictionFile += ' inputprofiler = importlib.util.module_from_spec(profiler_obj)'
self.predictionFile += '\\n'
self.predictionFile += ' profiler_obj.loader.exec_module(inputprofiler)'
self.predictionFile += '\\n'
self.predictionFile += ' selector_obj = importlib.util.spec_from_file_location("module.name", os.path.dirname(os.path.abspath(__file__))+"/selector.py")'
self.predictionFile += '\\n'
self.predictionFile += ' selector = importlib.util.module_from_spec(selector_obj)'
self.predictionFile += '\\n'
self.predictionFile += ' selector_obj.loader.exec_module(selector)'
self.predictionFile += '\\n'
self.predictionFile += ' output_format_obj = importlib.util.spec_from_file_location("module.name", os.path.dirname(os.path.abspath(__file__))+"/output_format.py")'
self.predictionFile += '\\n'
self.predictionFile += ' output_format = importlib.util.module_from_spec(output_format_obj)'
self.predictionFile += '\\n'
self.predictionFile += ' output_format_obj.loader.exec_module(output_format)'
self.predictionFile += '\\n'
self.predictionFile += ' df = json_normalize(jsonData)'
self.predictionFile += '\\n'
self.predictionFile += ' df0 = df.copy()'
self.predictionFile += '\\n'
#if(learner_type != 'TextML'):
self.predictionFile += ' profilerobj = inputprofiler.inputprofiler()'
self.predictionFile += '\\n'
self.predictionFile += ' df = profilerobj.apply_profiler(df)'
self.predictionFile += '\\n'
self.predictionFile += ' selectobj = selector.selector()'
self.predictionFile += '\\n'
self.predictionFile += ' df = selectobj.apply_selector(df)'
self.predictionFile += '\\n'
self.predictionFile += ' output = model.trained_model().predict(df,"")'
self.predictionFile += '\\n'
self.predictionFile += ' outputobj = output_format.output_format()'
self.predictionFile += '\\n'
self.predictionFile += ' output = outputobj.apply_output_format(df0,output)'
#self.predictionFile += '\\n'
#self.predictionFile += ' print(output)'
self.predictionFile += '\\n'
self.predictionFile += ' return output'
self.predictionFile += '\\n'
self.predictionFile += ' except KeyError as e:'
self.predictionFile += '\\n'
self.predictionFile |
+= ' output = {"status":"FAIL","message":str(e).strip(\\'"\\')}'
self.predictionFile += '\\n'
self.predictionFile += ' return json.dumps(output)'
self.predictionFile += '\\n'
self.predictionFile += ' |
")'
self.predictionFile += '\\n'
self.predictionFile += 'import json'
self.predictionFile += '\\n'
self.predictionFile += 'import os'
self.predictionFile += '\\n'
self.predictionFile += 'import sys'
self.predictionFile += '\\n'
self.predictionFile += 'import pandas as pd'
self.predictionFile += '\\n'
self.predictionFile += 'from pandas import json_normalize'
self.predictionFile += '\\n'
if(learner_type.lower() != 'recommendersystem'): #task 11190
self.predictionFile += 'from script.selector import selector'
self.predictionFile += '\\n'
self.predictionFile += 'from script.inputprofiler import inputprofiler'
self.predictionFile += '\\n'
#self.predictionFile += 'from '+classname+' import '+classname
self.predictionFile += 'from script.trained_model import trained_model'
self.predictionFile += '\\n'
else:
self.predictionFile += 'from script.item_recommendation import collaborative_filter'
self.predictionFile += '\\n'
self.predictionFile += 'from script.output_format import output_format'
self.predictionFile += '\\n'
if (learner_type != 'RecommenderSystem'): #task 11190
self.predictionFile += 'profilerobj = inputprofiler()'
self.predictionFile += '\\n'
self.predictionFile += 'selectobj = selector()'
self.predictionFile += '\\n'
self.predictionFile += 'modelobj = trained_model()'
self.predictionFile += '\\n'
else:
self.predictionFile += 'colabobj = collaborative_filter()'
self.predictionFile += '\\n'
self.predictionFile += 'outputobj = output_format()'
self.predictionFile += '\\n'
self.predictionFile += 'def predict(data):'
self.predictionFile += '\\n'
self.predictionFile += ' try:'
self.predictionFile += '\\n'
self.predictionFile += ' if os.path.splitext(data)[1] == ".tsv":'
self.predictionFile += '\\n'
self.predictionFile += ' df=pd.read_csv(data,encoding=\\'utf-8\\',sep=\\'\\\\t\\',skipinitialspace = True,na_values=[\\'-\\',\\'?\\'])'
self.predictionFile += '\\n'
self.predictionFile += ' elif os.path.splitext(data)[1] == ".csv":'
self.predictionFile += '\\n'
self.predictionFile += ' df=pd.read_csv(data,encoding=\\'utf-8\\',skipinitialspace = True,na_values=[\\'-\\',\\'?\\'])'
self.predictionFile += '\\n'
self.predictionFile += ' elif os.path.splitext(data)[1] == ".dat":'
self.predictionFile += '\\n'
self.predictionFile += ' df=pd.read_csv(data,encoding=\\'utf-8\\',skipinitialspace = True,na_values=[\\'-\\',\\'?\\'])'
self.predictionFile += '\\n'
self.predictionFile += ' else:'
self.predictionFile += '\\n'
self.predictionFile += ' if os.path.splitext(data)[1] == ".json":'
self.predictionFile += '\\n'
self.predictionFile += ' with open(data,\\'r\\',encoding=\\'utf-8\\') as f:'
self.predictionFile += '\\n'
self.predictionFile += ' jsonData = json.load(f)'
self.predictionFile += '\\n'
self.predictionFile += ' else:'
self.predictionFile += '\\n'
self.predictionFile += ' jsonData = json.loads(data)'
self.predictionFile += '\\n'
self.predictionFile += ' df = json_normalize(jsonData)'
self.predictionFile += '\\n'
self.predictionFile += ' df.rename(columns=lambda x: x.strip(), inplace=True)'
self.predictionFile += '\\n'
if str(rowfilterexpression) != '':
self.predictionFile += ' filterexpression = "'+rowfilterexpression+'"'
self.predictionFile += '\\n'
self.predictionFile += ' df = df.query(filterexpression)'
self.predictionFile += '\\n'
#print(grouperbyjson)
if str(grouperbyjson) != '':
datetime = grouperbyjson['datetime']
unit = grouperbyjson['unit']
if unit == '':
self.predictionFile += ' df[\\'date\\'] = pd.to_datetime(df[\\''+datetime+'\\'])'
self.predictionFile += '\\n'
else:
self.predictionFile += ' df[\\'date\\'] = pd.to_datetime(df[\\''+datetime+'\\'],unit=\\''+unit+'\\')'
self.predictionFile += '\\n'
self.predictionFile += ' df = df.reset_index()'
self.predictionFile += '\\n'
self.predictionFile += ' df.set_index(\\'date\\',inplace=True)'
self.predictionFile += '\\n'
self.predictionFile += ' df = df.'+grouperbyjson['groupbystring']
self.predictionFile += '\\n'
self.predictionFile += ' df.columns = df.columns.droplevel(0)'
self.predictionFile += '\\n'
self.predictionFile += ' df = df.reset_index()'
self.predictionFile += '\\n'
self.predictionFile += ' df0 = df.copy()'
self.predictionFile += '\\n'
if(learner_type != 'RecommenderSystem'): #task 11190
if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na':
self.predictionFile += ' df,datetimeFeature = profilerobj.apply_profiler(df)'
self.predictionFile += '\\n'
else:
self.predictionFile += ' df = profilerobj.apply_profiler(df)'
self.predictionFile += '\\n'
self.predictionFile += ' df = selectobj.apply_selector(df)'
self.predictionFile += '\\n'
#self.predictionFile += ' modelobj = '+classname+'()'
self.predictionFile += ' output = modelobj.predict(df,"")'
self.predictionFile += '\\n'
else:
self.predictionFile += ' output = colabobj.predict(df)'
self.predictionFile += '\\n'
if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na':
self.predictionFile += ' output = outputobj.apply_output_format(df0,output,datetimeFeature)'
self.predictionFile += '\\n'
else:
self.predictionFile += ' output = outputobj.apply_output_format(df0,output)'
self.predictionFile += '\\n'
self.predictionFile += ' print("predictions:",output)'
self.predictionFile += '\\n'
self.predictionFile += ' return(output)'
self.predictionFile += '\\n'
self.predictionFile += ' except KeyError as e:'
self.predictionFile += '\\n'
self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\\'"\\')}'
self.predictionFile += '\\n'
self.predictionFile += ' print("predictions:",json.dumps(output))'
self.predictionFile += '\\n'
self.predictionFile += ' return (json.dumps(output))'
self.predictionFile += '\\n'
self.predictionFile += ' except Exception as e:'
self.predictionFile += '\\n'
self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\\'"\\')}'
self.predictionFile += '\\n'
self.predictionFile += ' print("predictions:",json.dumps(output))'
self.predictionFile += '\\n'
self.predictionFile += ' return (json.dumps(output))'
self.predictionFile += '\\n'
self.predictionFile += 'if __name__ == "__main__":'
self.predictionFile += '\\n'
self.predictionFile += ' output = predict(sys.argv[1])'
filename = os.path.join(deploy_path,'aion_predict.py')
f = open(filename, "w")
f.write(str(self.predictionFile))
f.close()
def create_classification_text_performance_file(self,deploy_path,features,target):
features = ",".join([feature for feature in features])
self.predictionFile = """\\
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
import json
import os
import sys
from pandas import json_normalize
# from evidently.dashboard import Dashboard
# from evidently.tabs import ClassificationPerformanceTab
from evidently.pipeline.column_mapping import ColumnMapping
from aion_predict import predict
from evidently.report import Report
from evidently.pipeline.column_mapping import ColumnMapping
from evidently.metric_preset import ClassificationPreset
def odrift(data):
try:
"""
self.predictionFile += ' features = \\''+features+'\\''
self.predictionFile += '\\n'
self.predictionFile += ' target = \\''+target+'\\''
self.predictionFile += '\\n'
self.predictionFile +="""\\
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
production = predict().run(jsonData['currentDataLocation'])
reference = predict().run(jsonData['trainingDataLocation'])
production = json.loads(production)
reference = json.loads(reference)
if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'):
production = production['data']
production = json_normalize(production)
reference = reference['data']
reference = json_normalize(reference)
production['target'] = production[target]
reference['target'] = reference[target]
column_mapping = ColumnMapping()
column_mapping.target = target
column_mapping.prediction = 'prediction'
column_mapping.datetime = None
column_mapping.text_features = features.split(',')
iris_model_performance_dashboard = Report(metrics=[ClassificationPreset()])
iris_model_performance_dashboard.run(reference_data=reference, current_data=production,column_mapping=column_mapping)
report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html')
iris_model_performance_dashboard.save_html(report)
metrics_output = iris_model_performance_dashboard.as_dict()
output = {"status":"SUCCESS","htmlPath":report, 'drift_details':metrics_output['metrics']}
print("drift:",json.dumps(output))
return (json.dumps(output))
except KeyError as e:
print(e)
output = {"status":"FAIL","message":str(e).strip('"')}
print("drift:",json.dumps(output))
return (json.dumps(output))
except Exception as e:
print(e)
output = {"status":"FAIL","message":str(e).strip('"')}
print("drift:",json.dumps(output))
return (json.dumps(output))
if __name__ == "__main__":
output = odrift(sys.argv[1])"""
filename = os.path.join(deploy_path,'aion_opdrift.py')
f = open(filename, "wb")
f.write(str(self.predictionFile).encode('utf8'))
f.close()
def create_classification_performance_file(self,deploy_path,features,target):
features = ",".join([feature for feature in features])
self.predictionFile = """\\
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
import json
import os
import sys
from pandas import json_normalize
from evidently.report import Report
from evidently.metric_preset import ClassificationPreset
from evidently.pipeline.column_mapping import ColumnMapping
from aion_predict import predict
def odrift(data):
try:
"""
self.predictionFile += ' features = \\''+features+'\\''
self.predictionFile += '\\n'
self.predictionFile += ' target = \\''+target+'\\''
self.predictionFile += '\\n'
self.predictionFile +="""\\
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
production = predict().run(jsonData['currentDataLocation'])
reference = predict().run(jsonData['trainingDataLocation'])
production = json.loads(production)
reference = json.loads(reference)
if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'):
production = production['data']
production = json_normalize(production)
reference = reference['data']
reference = json_normalize(reference)
production['target'] = production[target]
reference['target'] = reference[target]
column_mapping = ColumnMapping()
column_mapping.target = target
column_mapping.prediction = 'prediction'
column_mapping.datetime = None
column_mapping.numerical_features = features.split(',')
model_performance_dashboard = Report(metrics = [ClassificationPreset()])
model_performance_dashboard.run(reference_data =reference, current_data =production, column_mapping = column_mapping)
report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html')
model_performance_dashboard.save_html(report)
metrics_output = model_performance_dashboard.as_dict()
output = {"status":"SUCCESS","htmlPath":report, 'drift_details':metrics_output['metrics']}
print("drift:",json.dumps(output))
return (json.dumps(output))
else:
output = {"status":"SUCCESS","htmlPath":'NA'}
print("drift:",json.dumps(output))
return (json.dumps(output))
except KeyError as e:
print(e)
output = {"status":"FAIL","message":str(e).strip('"')}
print("drift:",json.dumps(output))
return (json.dumps(output))
except Exception as e:
print(e)
output = {"status":"FAIL","message":str(e).strip('"')}
print("drift:",json.dumps(output))
return (json.dumps(output))
if __name__ == "__main__":
output = odrift(sys.argv[1])"""
filename = os. |
path.join(deploy_path,'aion_opdrift.py')
f = open(filename, "wb")
f.write(str(self.predictionFile).encode('utf8'))
f.close()
def create_model_service(self,deploy_path,serviceName,problemType):
filedata = """
from flask import Flask, jsonify, request
from flask_restful import Resource, Api
from aion_predict import predict"""
if problemType.lower() == 'classification' or problemType.lower() == 'regression':
filedata += """
from aion_xai import local_analysis
from aion_ipdrift import drift
from aion_opdrift import odrift"""
filedata += """
import json
import os
import pandas as pd
import io
import argparse
from pathlib import Path
from flask_cors import CORS, cross_origin
app = Flask(__name__)
#cross origin resource from system arguments
parser = argparse.ArgumentParser()
parser.add_argument('-ip', '--ipaddress', help='IP Address')
parser.add_argument('-p', '--port', help='Port Number')
parser.add_argument("-cors", type=str, required=False)
d = vars(parser.parse_args())
modelPath = Path(__file__).parent
try:
with open( (modelPath/'etc')/'display.json', 'r') as f:
disp_data = json.load(f)
is_explainable = not disp_data.get('textFeatures')
except:
disp_data = {}
is_explainable = True
if "cors" in d.keys():
if d["cors"] != '' and d["cors"] != None:
d["cors"] = [s.strip() for s in d["cors"].split(",")]
#cors = CORS(app, resources={r"/AION/*": {"origins": ["http://localhost", "http://localhost:5000"]}})
cors = CORS(app, resources={r"/AION/*": {"origins": d["cors"]}})
api = Api(app)
class predictapi(Resource):
def get(self):
features = disp_data.get('modelFeatures')
if features:
msg=\\"""
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
\\""".format(displaymsg={ x:'Value' for x in features})
else:
displaymsg='Data in JSON Format'
return jsonify(displaymsg)
def post(self):
data = request.get_json()
output = predict().run(json.dumps(data))
return jsonify(json.loads(output))
class predictfileapi(Resource):
def post(self):
if 'file' in request.files:
file = request.files['file']
urlData = file.read()
rawData = pd.read_csv(io.StringIO(urlData.decode('utf-8')))
data = rawData.to_json(orient='records')
output = predict().run(data)
return jsonify(json.loads(output))
else:
displaymsg='File is mising'
return jsonify(displaymsg)
def get(self):
msg=\\"""
RequestType: POST
Body:send file content in body\\"""
return jsonify(msg)
"""
if problemType.lower() == 'classification' or problemType.lower() == 'regression':
filedata += """
class explainapi(Resource):
def get(self):
features = disp_data.get('modelFeatures')
if features:
msg=\\"""
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
\\""".format(displaymsg={ x:'Value' for x in features})
else:
displaymsg='Data in JSON Format'
return jsonify(displaymsg)
def post(self):
data = request.get_json()
if is_explainable:
output = local_analysis(json.dumps(data))
else:
output = json.dumps({"status":"FAIL","data":"explain api is not supported when text features are used for training"})
return jsonify(json.loads(output))
class monitoringapi(Resource):
def get(self):
return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'})
def post(self):
data = request.get_json()
output = drift(json.dumps(data))
return jsonify(json.loads(output))
class performanceapi(Resource):
def get(self):
return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'})
def post(self):
data = request.get_json()
output = odrift(json.dumps(data))
return jsonify(json.loads(output))
"""
filedata += """
api.add_resource(predictapi, '/AION/{serviceName}/predict')""".format(serviceName=serviceName)
filedata += """
api.add_resource(predictfileapi, '/AION/{serviceName}/predict_file')""".format(serviceName=serviceName)
if problemType.lower() == 'classification' or problemType.lower() == 'regression':
filedata += """
api.add_resource(explainapi, '/AION/{serviceName}/explain')
api.add_resource(monitoringapi, '/AION/{serviceName}/monitoring')
api.add_resource(performanceapi, '/AION/{serviceName}/performance')""".format(serviceName=serviceName)
filedata += """
if __name__ == '__main__':
args = parser.parse_args()
app.run(args.ipaddress,port = args.port,debug = True)"""
filename = os.path.join(deploy_path,'aion_service.py')
f = open(filename, "wb")
f.write(str(filedata).encode('utf8'))
f.close()
def create_regression_performance_file(self,deploy_path,features,target):
features = ",".join([feature for feature in features])
self.predictionFile = """\\
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
import json
import os
import sys
from pandas import json_normalize
from evidently.report import Report
from evidently.metric_preset import RegressionPreset
from evidently.pipeline.column_mapping import ColumnMapping
from aion_predict import predict
def odrift(data):
try:
"""
self.predictionFile += ' features = \\''+features+'\\''
self.predictionFile += '\\n'
self.predictionFile += ' target = \\''+target+'\\''
self.predictionFile += '\\n'
self.predictionFile +="""\\
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
production = predict().run(jsonData['currentDataLocation'])
reference = predict().run(jsonData['trainingDataLocation'])
production = json.loads(production)
reference = json.loads(reference)
if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'):
production = production['data']
production = json_normalize(production)
reference = reference['data']
reference = json_normalize(reference)
production['target'] = production[target]
reference['target'] = reference[target]
column_mapping = ColumnMapping()
column_mapping.target = target
column_mapping.prediction = 'prediction'
column_mapping.datetime = None
column_mapping.numerical_features = features.split(',')
iris_model_performance_dashboard = Report(metrics=[RegressionPreset()])
iris_model_performance_dashboard.run(reference_data = reference, current_data = production, column_mapping = column_mapping)
report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html')
iris_model_performance_dashboard.save_html(report)
metrics_output = iris_model_performance_dashboard.as_dict()
output = {"status":"SUCCESS","htmlPath":report, 'drift_details':metrics_output['metrics']}
print("drift:",json.dumps(output))
return (json.dumps(output))
else:
output = {"status":"SUCCESS","htmlPath":'NA'}
print("drift:",json.dumps(output))
return (json.dumps(output))
except KeyError as e:
print(e)
output = {"status":"FAIL","message":str(e).strip('"')}
print("drift:",json.dumps(output))
return (json.dumps(output))
except Exception as e:
print(e)
output = {"status":"FAIL","message":str(e).strip('"')}
print("drift:",json.dumps(output))
return (json.dumps(output))
if __name__ == "__main__":
output = odrift(sys.argv[1])"""
filename = os.path.join(deploy_path,'aion_opdrift.py')
f = open(filename, "wb")
f.write(str(self.predictionFile).encode('utf8'))
f.close()
def create_regression_text_performance_file(self,deploy_path,features,target):
features = ",".join([feature for feature in features])
self.predictionFile = """\\
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
import json
import os
import sys
from pandas import json_normalize
from aion_predict import predict
from evidently.report import Report
from evidently.pipeline.column_mapping import ColumnMapping
from evidently.metric_preset import RegressionPreset
def odrift(data):
try:
"""
self.predictionFile += ' features = \\''+features+'\\''
self.predictionFile += '\\n'
self.predictionFile += ' target = \\''+target+'\\''
self.predictionFile += '\\n'
self.predictionFile +="""\\
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
production = predict().run(jsonData['currentDataLocation'])
reference = predict().run(jsonData['trainingDataLocation'])
production = json.loads(production)
reference = json.loads(reference)
if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'):
production = production['data']
production = json_normalize(production)
reference = reference['data']
reference = json_normalize(reference)
production['target'] = production[target]
reference['target'] = reference[target]
column_mapping = ColumnMapping()
column_mapping.target = target
column_mapping.prediction = 'prediction'
column_mapping.datetime = None
column_mapping.numerical_features = features.split(',')
iris_model_performance_dashboard = Report(metrics=[RegressionPreset()])
iris_model_performance_dashboard.run(reference_data=reference, current_data=production,column_mapping=column_mapping)
report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html')
iris_model_performance_dashboard.save_html(report)
metrics_output = iris_model_performance_dashboard.as_dict()
output = {"status":"SUCCESS","htmlPath":report, 'drift_details':metrics_output['metrics']}
print("drift:",json.dumps(output))
return (json.dumps(output))
else:
output = {"status":"SUCCESS","htmlPath":'NA'}
print("drift:",json.dumps(output))
return (json.dumps(output))
except KeyError as e:
print(e)
output = {"status":"FAIL","message":str(e).strip('"')}
print("drift:",json.dumps(output))
return (json.dumps(output))
except Exception as e:
print(e)
output = {"status":"FAIL","message":str(e).strip('"')}
print("drift:",json.dumps(output))
return (json.dumps(output))
if __name__ == "__main__":
output = odrift(sys.argv[1])"""
filename = os.path.join(deploy_path,'aion_opdrift.py')
f = open(filename, "wb")
f.write(str(self.predictionFile).encode('utf8'))
f.close()
def create_publish_service(self,datalocation,usecaseid,version,problemType):
filename = os.path.join(datalocation,'aion_publish_service.py')
if not os.path.exists(filename):
filedata = """
import sys
import json
import time
import sqlite3
import argparse
import pandas as pd
import io
from pathlib import Path
from datetime import datetime
filename = Path(__file__).parent/'config.json'
with open (filename, "r") as f:
data = json.loads(f.read())
modelVersion = str(data['version'])
modelPath = Path(__file__).parent/modelVersion
sys.path.append(str(modelPath))
try:
with open( (modelPath/'etc')/'display.json', 'r') as f:
disp_data = json.load(f)
is_explainable = not disp_data.get('textFeatures')
except:
disp_data = {}
is_explainable = True
from flask import Flask, jsonify, request
from flask_restful import Resource, Api
from flask_cors import CORS, cross_origin
from flask import Response
from aion_predict import predict
"""
if problemType.lower() == 'classification' or problemType.lower() == 'regression':
filedata += """
from aion_ipdrift import drift
from aion_opdrift import odrift
if is_explainable:
from aion_xai import local_analysis
"""
filedata += """
dataPath = Path(__file__).parent/'data'
dataPath.mkdir(parents=True, exist_ok=True)
app = Flask(__name__)
#cross origin resource from system arguments
parser = argparse.ArgumentParser()
parser.add_argument('-ip', '--ipaddress', help='IP Address')
parser.add_argument('-p', '--port', help='Port Number')
parser.add_argument("-cors", type=str, required=False)
d = vars(parser.parse_args())
if "cors" in d.keys():
if d["cors"] != '' and d["cors"] != None:
d["cors"] = [s.strip() for s in d["cors"].split(",")]
#cors = CORS(app, resources={r"/AION/*": {"origins": ["http://localhost", "http://localhost:5000"]}})
cors = CORS(app, resources={r"/AION/*": {"origins": d["cors"]}})
api = Api(app)
class sqlite_db():
def __init__(self, location, database_file=None):
if not isinstance(location, Path):
location = Path(location)
if database_file:
self.database_name = database_file
else:
self.database_name = location.stem + |
'.db'
db_file = str(location/self.database_name)
self.conn = sqlite3.connect(db_file)
self.cursor = self.conn.cursor()
self.tables = []
def table_exists(self, name):
if name in self.tables:
return True
elif name:
query = f"SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';"
listOfTables = self.cursor.execute(query).fetchall()
if len(listOfTables) > 0 :
self.tables.append(name)
return True
return False
def read(self, table_name,condition=''):
if condition == '':
return pd.read_sql_query(f"SELECT * FROM {table_name}", self.conn)
else:
return pd.read_sql_query(f"SELECT * FROM {table_name} WHERE {condition}", self.conn)
def create_table(self,name, columns, dtypes):
query = f'CREATE TABLE IF NOT EXISTS {name} ('
for column, data_type in zip(columns, dtypes):
query += f"'{column}' TEXT,"
query = query[:-1]
query += ');'
self.conn.execute(query)
return True
def update(self,table_name,updates,condition):
update_query = f'UPDATE {table_name} SET {updates} WHERE {condition}'
self.cursor.execute(update_query)
self.conn.commit()
return True
def write(self,data, table_name):
if not self.table_exists(table_name):
self.create_table(table_name, data.columns, data.dtypes)
tuple_data = list(data.itertuples(index=False, name=None))
insert_query = f'INSERT INTO {table_name} VALUES('
for i in range(len(data.columns)):
insert_query += '?,'
insert_query = insert_query[:-1] + ')'
self.cursor.executemany(insert_query, tuple_data)
self.conn.commit()
return True
def delete(self, name):
pass
def close(self):
self.conn.close()"""
filedata += """
app = Flask(__name__)
api = Api(app)
class predictapi(Resource):
def get(self):
features = disp_data.get('modelFeatures')
if features:
msg=\\"""
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
\\""".format(displaymsg={ x:'Value' for x in features})
else:
displaymsg='Data in JSON Format'
return jsonify(displaymsg)
def post(self):
sqlite_dbObj = sqlite_db(dataPath,'data.db')
if not sqlite_dbObj.table_exists('metrices'):
data = {'noOfPredictCalls':'0','noOfDriftCalls':'0',"noOfActualCalls":'0',"mid":'0'}
data = pd.DataFrame(data, index=[0])
sqlite_dbObj.create_table('metrices',data.columns, data.dtypes)
data = request.get_json()
output = predict().run(json.dumps(data))
outputobj = json.loads(output)
if outputobj['status'] == 'SUCCESS':
try:
df2 = pd.read_json(json.dumps(outputobj['data']), orient ='records')
if not sqlite_dbObj.table_exists('prodData'):
sqlite_dbObj.create_table('prodData',df2.columns, df2.dtypes)
sqlite_dbObj.write(df2,'prodData')
except:
pass
try:
data = sqlite_dbObj.read('metrices')
#print(data)
if len(data) == 0:
data = [{'mid':'0','noOfPredictCalls':'1','noOfDriftCalls':'0',"noOfActualCalls":'0'}]
data = pd.read_json(json.dumps(data), orient ='records')
sqlite_dbObj.write(data,'metrices')
else:
noofPredictCalls = int(data['noOfPredictCalls'].iloc[0])+1
sqlite_dbObj.update('metrices',"noOfPredictCalls = '"+str(noofPredictCalls)+"'","mid = 0")
except Exception as e:
print(e)
pass
return jsonify(json.loads(output))
class predictfileapi(Resource):
def post(self):
sqlite_dbObj = sqlite_db(dataPath,'data.db')
if not sqlite_dbObj.table_exists('metrices'):
data = {'noOfPredictCalls':'0','noOfDriftCalls':'0',"noOfActualCalls":'0',"mid":'0'}
data = pd.DataFrame(data, index=[0])
sqlite_dbObj.create_table('metrices',data.columns, data.dtypes)
if 'file' in request.files:
file = request.files['file']
urlData = file.read()
rawData = pd.read_csv(io.StringIO(urlData.decode('utf-8')))
data = rawData.to_json(orient='records')
output = predict().run(data)
outputobj = json.loads(output)
if outputobj['status'] == 'SUCCESS':
try:
df2 = pd.read_json(json.dumps(outputobj['data']), orient ='records')
if not sqlite_dbObj.table_exists('prodData'):
sqlite_dbObj.create_table('prodData',df2.columns, df2.dtypes)
sqlite_dbObj.write(df2,'prodData')
except:
pass
try:
data = sqlite_dbObj.read('metrices')
#print(data)
if len(data) == 0:
data = [{'mid':'0','noOfPredictCalls':'1','noOfDriftCalls':'0',"noOfActualCalls":'0'}]
data = pd.read_json(json.dumps(data), orient ='records')
sqlite_dbObj.write(data,'metrices')
else:
noofPredictCalls = int(data['noOfPredictCalls'].iloc[0])+1
sqlite_dbObj.update('metrices',"noOfPredictCalls = '"+str(noofPredictCalls)+"'","mid = 0")
except Exception as e:
print(e)
pass
return jsonify(json.loads(output))
else:
output = {'status':'error','msg':'File is missing'}
return jsonify(output)
"""
if problemType.lower() == 'classification' or problemType.lower() == 'regression':
filedata += """
class explainapi(Resource):
def get(self):
features = disp_data.get('modelFeatures')
if features:
msg=\\"""
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
\\""".format(displaymsg={ x:'Value' for x in features})
else:
displaymsg='Data in JSON Format'
return jsonify(displaymsg)
def post(self):
data = request.get_json()
if is_explainable:
output = local_analysis(json.dumps(data))
else:
output = json.dumps({"status":"FAIL","data":"explain api is not supported when text features are used for training"})
return jsonify(json.loads(output))
class monitoringapi(Resource):
def get(self):
return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'})
def post(self):
sqlite_dbObj = sqlite_db(dataPath,'data.db')
if not sqlite_dbObj.table_exists('monitoring'):
data = {'status':'No Drift','Msg':'No Input Drift Found','RecordTime':'Time','version':'1'}
data = pd.DataFrame(data, index=[0])
sqlite_dbObj.create_table('monitoring',data.columns, data.dtypes)
trainingDataPath = (modelPath/'data')/'preprocesseddata.csv.gz'
if not sqlite_dbObj.table_exists('prodData'):
return jsonify({'status':'Error','msg':'Prod data not available'})
data = sqlite_dbObj.read('prodData')
filetimestamp = str(int(time.time()))
dataFile = dataPath/('AION_' + filetimestamp+'.csv')
data.to_csv(dataFile, index=False)
data = request.get_json()
data={'trainingDataLocation':trainingDataPath,'currentDataLocation':dataFile}
output = drift(json.dumps(data))
outputData = json.loads(output)
status = outputData['status']
if status == 'SUCCESS':
Msg = str(outputData['data'])
else:
Msg = 'Error during drift analysis'
now = datetime.now() # current date and time
date_time = now.strftime("%m/%d/%Y, %H:%M:%S")
data = {'status':status,'Msg':Msg,'RecordTime':date_time,'version':modelVersion}
data = pd.DataFrame(data, index=[0])
sqlite_dbObj.write(data,'monitoring')
return jsonify(json.loads(output))"""
filedata += """
class matricesapi(Resource):
def get(self):
sqlite_dbObj = sqlite_db(dataPath,'data.db')
if sqlite_dbObj.table_exists('metrices'):
df1 = sqlite_dbObj.read('metrices')
else:
df1 = pd.DataFrame()
#print(df1)
if sqlite_dbObj.table_exists('monitoring'):
df2 = sqlite_dbObj.read('monitoring')
else:
df2 = pd.DataFrame()
msg = {'Deployed Version':str(modelVersion)}
if df1.shape[0] > 0:
msg.update({'noOfPredictCalls':str(df1['noOfPredictCalls'].iloc[0])})
else:
msg.update({'noOfPredictCalls':'0'})
driftDetails = []
for idx in reversed(df2.index):
driftd = {'version':str(df2.version[idx]),'status':str(df2.status[idx]),'recordTime':str(df2.RecordTime[idx]),'msg':str(df2.Msg[idx])}
driftDetails.append(driftd)
msg.update({'driftDetails':driftDetails})
return jsonify(msg)
class performanceapi(Resource):
def get(self):
return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'})
def post(self):
sqlite_dbObj = sqlite_db(dataPath,'data.db')
if not sqlite_dbObj.table_exists('monitoring'):
data = {'status':'No Drift','Msg':'No Input Drift Found','RecordTime':'Time','version':'1'}
data = pd.DataFrame(data, index=[0])
sqlite_dbObj.create_table('monitoring',data.columns, data.dtypes)
trainingDataPath = (modelPath/'data')/'preprocesseddata.csv.gz'
if not sqlite_dbObj.table_exists('prodData'):
return jsonify({'status':'Error','msg':'Prod data not available'})
data = sqlite_dbObj.read('prodData')
filetimestamp = str(int(time.time()))
dataFile = dataPath/('AION_' + filetimestamp+'.csv')
data.to_csv(dataFile, index=False)
data = request.get_json()
data={'trainingDataLocation':trainingDataPath,'currentDataLocation':dataFile}
output = odrift(json.dumps(data))
return jsonify(json.loads(output))
"""
filedata += """
api.add_resource(predictapi, '/AION/{serviceName}/predict')
api.add_resource(predictfileapi, '/AION/{serviceName}/predict_file')
api.add_resource(matricesapi, '/AION/{serviceName}/metrices')""".format(serviceName=usecaseid)
if problemType.lower() == 'classification' or problemType.lower() == 'regression':
filedata += """
api.add_resource(explainapi, '/AION/{serviceName}/explain')
api.add_resource(monitoringapi, '/AION/{serviceName}/monitoring')
api.add_resource(performanceapi, '/AION/{serviceName}/performance')
""".format(serviceName=usecaseid)
filedata += """
if __name__ == '__main__':
args = parser.parse_args()
app.run(args.ipaddress,port = args.port,debug = True)"""
f = open(filename, "wb")
f.write(str(filedata).encode('utf8'))
f.close()
data = {'version':version}
filename = os.path.join(datalocation,'config.json')
with open(filename, "w") as outfile:
json.dump(data, outfile)
outfile.close() <s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os,sys
import platform
import json
import shutil
import logging
from pathlib import Path
def create_selector_file(self,deploy_path,features,pcaModel_pickle_file,bpca_features,apca_features,textFeatures,nonNumericFeatures,numericalFeatures,profiler,targetFeature, model_type,model,config=None):
self.selectorfile += 'import pandas as pd'
self.selectorfile += '\\n'
self.selectorfile += 'import joblib'
self.selectorfile += '\\n'
self.selectorfile += 'import os'
self.selectorfile += '\\n'
self.selectorfile += 'import numpy as np'
self.selectorfile += '\\n'
self.selectorfile += 'class selector(object):'
self.selectorfile += '\\n'
self.selectorfile += ' def apply_selector(self,df):'
self.selectorfile += '\\n'
if pcaModel_pickle_file != '':
self.selectorfile += " pcaModel = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','"+pcaModel_pickle_file+"'))"
self.selectorfile += '\\n'
self.selectorfile += ' bpca_features = '+str(bpca_features)
self.selectorfile += '\\n'
self.selectorfile += ' apca_features = '+str(apca_features)
self.selectorfile += '\\n'
self.selectorfile += ' df = pcaModel.transform(df[bpca_features])'
self.selectorfile += '\\n'
self.selectorfile += ' df = pd.DataFrame(df,columns=apca_features)'
self.selector |
file += '\\n'
if(len(features) != 0) and model_type != 'BM25':
if model_type.lower()!='anomaly_detection' and model.lower() != 'autoencoder':
self.selectorfile += ' df = df['+str(features)+']'
self.selectorfile += '\\n'
self.selectorfile += ' return(df)'
filename = os.path.join(deploy_path,'script','selector.py')
f = open(filename, "wb")
self.log.info('-------> Feature Selector File Location :'+filename)
f.write(str(self.selectorfile).encode('utf8'))
f.close()
featurefile = 'import json'
featurefile +='\\n'
featurefile += 'def getfeatures():'
featurefile +='\\n'
featurefile +=' try:'
featurefile +='\\n'
featurelist = []
if 'profiler' in config:
if 'input_features_type' in config['profiler']:
inputfeatures = config['profiler']['input_features_type']
for x in inputfeatures:
featurelt={}
featurelt['feature'] = x
print(x,inputfeatures[x])
if x == targetFeature:
featurelt['Type'] = 'Target'
else:
if inputfeatures[x] in ['int','int64','float','float64']:
featurelt['Type'] = 'Numeric'
elif inputfeatures[x] == 'object':
featurelt['Type'] = 'Text'
elif inputfeatures[x] == 'category':
featurelt['Type'] = 'Category'
else:
featurelt['Type'] = 'Unknown'
featurelist.append(featurelt)
featurefile +=' features = '+str(featurelist)
featurefile +='\\n'
featurefile +=' outputjson = {"status":"SUCCESS","features":features}'
featurefile +='\\n'
featurefile +=' output = json.dumps(outputjson)'
featurefile +='\\n'
featurefile +=' print("Features:",output)'
featurefile +='\\n'
featurefile +=' return(output)'
featurefile +='\\n'
featurefile +=' except Exception as e:'
featurefile +='\\n'
featurefile +=' output = {"status":"FAIL","message":str(e).strip(\\'"\\')}'
featurefile +='\\n'
featurefile +=' print("Features:",json.dumps(output))'
featurefile +='\\n'
featurefile +=' return (json.dumps(output))'
featurefile +='\\n'
featurefile +='if __name__ == "__main__":'
featurefile +='\\n'
featurefile +=' output = getfeatures()'
filename = os.path.join(deploy_path,'featureslist.py')
f = open(filename, "wb")
f.write(str(featurefile).encode('utf8'))
f.close()
def create_init_function_for_classification(self,modelfile,classes,learner_type,scoreParam,loss_matrix,optimizer,preprocessing_pipe,modelName,model_type,imageconfig):
self.modelfile += ' def __init__(self):'
self.modelfile += '\\n'
if (learner_type == 'ML' and model_type.lower()=='anomaly_detection' and modelName.lower()=="autoencoder"):
modelfile=modelfile.replace('.sav','')
self.modelfile+=" self.model = tf.keras.models.load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))"
self.modelfile += '\\n'
elif(learner_type == 'TextDL' or learner_type == 'DL'):
if modelName.lower() == 'googlemodelsearch':
self.modelfile += ' import autokeras as ak'
self.modelfile += '\\n'
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','modelsearch_rootdir','saved_model_onnx.onnx'))"
self.modelfile += '\\n'
else:
if scoreParam == 'recall':
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects={'recall': recall_m},compile=False)"
self.modelfile += '\\n'
self.modelfile += ' self.model.compile(loss=\\''+loss_matrix+'\\',optimizer=\\''+optimizer+'\\', metrics=[recall_m])'
self.modelfile += '\\n'
elif scoreParam == 'precision':
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects={'precision': precision_m},compile=False)"
self.modelfile += '\\n'
self.modelfile += ' self.model.compile(loss=\\''+loss_matrix+'\\',optimizer=\\''+optimizer+'\\', metrics=[precision_m])'
self.modelfile += '\\n'
elif scoreParam == 'roc_auc':
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),compile=False)"
self.modelfile += '\\n'
self.modelfile += ' self.model.compile(loss=\\''+loss_matrix+'\\',optimizer=\\''+optimizer+'\\', metrics=[tf.keras.metrics.AUC()])'
self.modelfile += '\\n'
elif scoreParam == 'f1_score':
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects={'f1_score': f1_m},compile=False)"
self.modelfile += '\\n'
self.modelfile += ' self.model.compile(loss=\\''+loss_matrix+'\\',optimizer=\\''+optimizer+'\\', metrics=[f1_m])'
self.modelfile += '\\n'
elif scoreParam == 'r2':
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects={'r2': r_square},compile=False)"
self.modelfile += '\\n'
self.modelfile += ' self.model.compile(loss=\\''+loss_matrix+'\\',optimizer=\\''+optimizer+'\\', metrics=[r_square])'
self.modelfile += '\\n'
elif scoreParam == 'rmse':
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects={'rmse': rmse_m},compile=False)"
self.modelfile += '\\n'
self.modelfile += ' self.model.compile(loss=\\''+loss_matrix+'\\',optimizer=\\''+optimizer+'\\', metrics=[rmse_m])'
self.modelfile += '\\n'
elif scoreParam == 'mse':
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))"
self.modelfile += '\\n'
elif scoreParam == 'mae':
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))"
self.modelfile += '\\n'
elif scoreParam == 'accuracy':
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))"
self.modelfile += '\\n'
else:
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))"
self.modelfile += '\\n'
elif(learner_type == 'Text Similarity'):
self.modelfile += " self.preprocessing = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','"+preprocessing_pipe+"'))"
self.modelfile += '\\n'
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'), custom_objects={'cosine_distance': cosine_distance, 'cos_dist_output_shape': cos_dist_output_shape})"
self.modelfile += '\\n'
elif(learner_type in ['similarityIdentification','contextualSearch']):
if scoreParam == 'VectorDB Cosine':
vectorfiledbname = 'trainingdataVecDB'
self.modelfile += f"\\
\\n persist_directory = os.path.join(os.path.dirname(__file__),'..','data')\\
\\n client = chromadb.PersistentClient(path=persist_directory)\\
\\n self.collection_name = '{vectorfiledbname}'\\
\\n self.collection = client.get_collection(self.collection_name)\\n"
else:
self.modelfile += " self.train_input = pd.read_csv(os.path.join(os.path.dirname(__file__),'..','data','trainingdata.csv'))\\n\\n"
elif(learner_type == 'ImageClassification'):
self.modelfile += ' self.config='+str(imageconfig)
self.modelfile += '\\n'
if(modelName.lower() == 'densenet'):
self.modelfile += ' baseModel = tf.keras.applications.DenseNet121(weights="imagenet", include_top=False, input_tensor=Input(shape=(self.config[\\'img_width\\'],self.config[\\'img_height\\'],self.config[\\'img_channel\\'])))'
else:
self.modelfile += ' baseModel = tensorflow.keras.applications.InceptionV3(weights="imagenet", include_top=False, input_tensor=Input(shape=(self.config[\\'img_width\\'],self.config[\\'img_height\\'],self.config[\\'img_channel\\'])))'
self.modelfile += '\\n'
self.modelfile += ' headModel = baseModel.output'
self.modelfile += '\\n'
self.modelfile += ' headModel = Flatten(name="flatten")(headModel)'
self.modelfile += '\\n'
self.modelfile += ' headModel = Dense(1024, activation=\\'relu\\')(headModel)'
self.modelfile += '\\n'
self.modelfile += ' headModel = Dropout(0.5)(headModel)'
self.modelfile += '\\n'
self.modelfile += ' headModel = Dense(2, activation=\\'sigmoid\\')(headModel)'
self.modelfile += '\\n'
self.modelfile += ' headModel = self.model = Model(inputs=baseModel.input, outputs=headModel)'
self.modelfile += '\\n'
self.modelfile += ' opt = Adam(lr=self.config[\\'lr\\'])'
self.modelfile += '\\n'
self.modelfile += ' self.model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])'
self.modelfile += '\\n'
self.modelfile += " self.model.load_weights(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))"
self.modelfile += '\\n'
elif(learner_type == 'objectDetection'):
self.modelfile += " self.MODEL_LOCATION = os.path.join(os.path.dirname(__file__),'..','model')\\n"
self.modelfile += ' PATH_TO_CFG = self.MODEL_LOCATION+"/export/pipeline.config"\\n'
self.modelfile += ' PATH_TO_CKPT = self.MODEL_LOCATION+"/export/checkpoint/"\\n'
self.modelfile += ' PATH_TO_LABELS = self.MODEL_LOCATION+"/export/label_map.pbtxt"\\n'
self.modelfile += ' configs = config_util.get_configs_from_pipeline_file(PATH_TO_CFG)\\n'
self.modelfile += ' self.detection_model = model_builder.build(model_config=configs["model"], is_training=False)\\n'
self.modelfile += ' ckpt = tf.compat.v2.train.Checkpoint(model=self.detection_model)\\n'
self.modelfile += ' ckpt.restore(os.path.join(PATH_TO_CKPT, "ckpt-0")).expect_partial()\\n'
self.modelfile += ' self.category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS,\\
use_display_name=True)\\n'
elif learner_type == 'TS' and (modelName.lower() == 'lstm' or modelName.lower() == 'mlp'):
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))"
self.modelfile += '\\n'
elif modelName.lower() == 'neural architecture search':
self.modelfile += ' import autokeras as ak'
self.modelfile += '\\n'
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects=ak.CUSTOM_OBJECTS)"
self.modelfile += '\\n'
else:
self.modelfile += " self.model = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))"
self.modelfile += '\\n'
def create_predict(self,learner_type,method,model,model_type,threshold,firstDocFeature,secondDocFeature,padding_length,optimizationmethod,sessonal_freq,additional_regressors,feature,modelFeatures,indexFeature,lag_order,scalertransformationFile,datetimeFeature,scoreParam=None):
self.modelfile += ' def predict(self,X,features_names):'
self.modelfile += '\\n'
if (learner_type == 'ML' and model_type.lower()=='anomaly_detection' and model.lower()=="autoencoder"):
self.modelfile += f" X=X[{feature}]\\n"
self.modelfile += f" X = np.asarray(X).astype('float32')\\n"
self.modelfile += f" reconstructed = self.model.predict(X)\\n"
self.modelfile += f" predict_ |
loss = tf.keras.losses.mae(reconstructed,X)\\n"
self.modelfile += ' max_threshold = np.mean(predict_loss) + 2*np.std(predict_loss)\\n'
self.modelfile += ' min_threshold = np.mean(predict_loss) - 2*np.std(predict_loss)\\n'
self.modelfile += ' prediction_df = pd.DataFrame()\\n'
self.modelfile += ' prediction_df["loss"] = predict_loss\\n'
self.modelfile += ' prediction_df["max_threshold"] = max_threshold\\n'
self.modelfile += ' prediction_df["min_threshold"] = min_threshold\\n'
self.modelfile += ' prediction_df["anomaly"] = np.where((prediction_df["loss"] > prediction_df["max_threshold"]) | (prediction_df["loss"] <= prediction_df["min_threshold"]), True, False)\\n'
self.modelfile += ' return prediction_df\\n'
elif(learner_type == 'RecommenderSystem'):
self.modelfile += ' predictions = []'
self.modelfile += '\\n'
self.modelfile += ' for index,row in X.iterrows():'
self.modelfile += '\\n'
self.modelfile += ' score = self.model.predict(int(row["uid"]),int(row["iid"]))'
self.modelfile += '\\n'
self.modelfile += ' predictions.append(score.est)'
self.modelfile += '\\n'
self.modelfile += ' return predictions'
elif(learner_type in ['similarityIdentification','contextualSearch']):
tfeatures = list(modelFeatures.split(","))
if indexFeature != '' and indexFeature != 'NA':
ifeatures = indexFeature.split(",")
for ifes in ifeatures:
if ifes not in tfeatures:
tfeatures.append(ifes)
if model_type == 'BM25':
self.modelfile += f"\\n\\
tokenized_corpus =[doc.split(' ') for doc in self.train_input.tokenize]\\n\\
bm25 = BM25Okapi(tokenized_corpus)\\n\\
tokenized_query = [doc.split(' ') for doc in X.tokenize]\\n\\
logcnt = 5\\n\\
output = []\\n\\
for query in tokenized_query:\\n\\
doc_scores = bm25.get_scores(query)\\n\\
related_docs_indices = np.argsort(doc_scores)[::-1][:logcnt]\\n\\
x = self.train_input[{tfeatures}].loc[self.train_input.index[related_docs_indices]]\\n\\
x['Score'] = doc_scores[related_docs_indices]\\n\\
x['Score'] = round(x['Score'],2).astype(str)+'%'\\n\\
output.append(x)\\n\\
return output\\n"
elif scoreParam == 'VectorDB Cosine':
featuresVecDB = modelFeatures.split(",")
self.modelfile += ' logcnt = 5\\n'
self.modelfile += f" columns = {featuresVecDB}\\n"
self.modelfile += f"\\
\\n output = []\\
\\n for rowindex, row in X.iterrows():\\
\\n queryembedding = X.iloc[rowindex:rowindex+1].to_numpy()\\
\\n results = self.collection.query(\\
\\n query_embeddings=queryembedding.tolist(),\\
\\n n_results=logcnt\\
\\n )\\
\\n x = pd.DataFrame(columns=columns)\\
\\n for i in range(0, len(results['ids'][0])):\\
\\n documentAry = results['documents'][0][i]\\
\\n documentAry = documentAry.split(' ~&~ ')\\
\\n for j in range(0, len(documentAry)):\\
\\n x.at[i,columns[j]] = documentAry[j]\\
\\n x.at[i,'Score'] = results['distances'][0][i]\\
\\n output.append(x)\\
\\n return output"
else:
self.modelfile += ' columns = self.train_input.columns.tolist()\\n'
self.modelfile += ' logcnt = 5\\n'
self.modelfile += f" train_input = self.train_input[{tfeatures}]\\n"
for tf in tfeatures:
self.modelfile += f" columns.remove('{tf}')\\n"
self.modelfile += f"\\
\\n results = cosine_similarity(self.train_input[columns],X)\\
\\n output = []\\
\\n for i in range(results.shape[1]):\\
\\n related_docs_indices = results[:,i].argsort(axis=0)[:-(int(logcnt) + 1):-1]\\
\\n x=self.train_input[{tfeatures}].loc[self.train_input.index[related_docs_indices]]\\
\\n scores = []\\
\\n for j in range(0,logcnt):\\
\\n scores.append(str(round((results[related_docs_indices][j][i])*100))+'%')\\
\\n x['Score'] = scores\\
\\n output.append(x)\\
\\n return output"
elif(learner_type == 'Text Similarity'):
self.modelfile += ' X["'+firstDocFeature+'"] = X["'+firstDocFeature+'"].astype(str)'
self.modelfile += '\\n'
self.modelfile += ' X["'+secondDocFeature+'"] = X["'+secondDocFeature+'"].astype(str)'
self.modelfile += '\\n'
self.modelfile += ' test_sentence1 = self.preprocessing.texts_to_sequences(X["'+firstDocFeature+'"].values)'
self.modelfile += '\\n'
self.modelfile += ' test_sentence2 = self.preprocessing.texts_to_sequences(X["'+secondDocFeature+'"].values)'
self.modelfile += '\\n'
self.modelfile += ' test_sentence1 = pad_sequences(test_sentence1, maxlen='+str(padding_length)+', padding=\\'post\\')'
self.modelfile += '\\n'
self.modelfile += ' test_sentence2 = pad_sequences(test_sentence2, maxlen='+str(padding_length)+', padding=\\'post\\')'
self.modelfile += '\\n'
self.modelfile += ' prediction = self.model.predict([test_sentence1, test_sentence2 ])'
self.modelfile += '\\n'
self.modelfile += ' return(prediction)'
self.modelfile += '\\n'
elif(learner_type == 'ImageClassification'):
self.modelfile += ' predictions = []'
self.modelfile += '\\n'
self.modelfile += ' for index, row in X.iterrows(): '
self.modelfile += '\\n'
self.modelfile += ' img = cv2.imread(row[\\'imagepath\\'])'
self.modelfile += '\\n'
self.modelfile += ' img = cv2.resize(img, (self.config[\\'img_width\\'],self.config[\\'img_height\\']))'
self.modelfile += '\\n'
self.modelfile += ' img = image.img_to_array(img)'
self.modelfile += '\\n'
self.modelfile += ' img = np.expand_dims(img, axis=0)'
self.modelfile += '\\n'
self.modelfile += ' img = img/255'
self.modelfile += '\\n'
self.modelfile += ' prediction = self.model.predict(img)'
self.modelfile += '\\n'
self.modelfile += ' prediction = np.argmax(prediction,axis=1)'
self.modelfile += '\\n'
self.modelfile += ' predictions.append(prediction[0])'
self.modelfile += '\\n'
self.modelfile += ' return(predictions)'
self.modelfile += '\\n'
elif(learner_type == 'objectDetection'):
self.modelfile += ' @tf.function\\n'
self.modelfile += ' def detect_fn(image):\\n'
self.modelfile += ' image, shapes = self.detection_model.preprocess(image)\\n'
self.modelfile += ' prediction_dict = self.detection_model.predict(image, shapes)\\n'
self.modelfile += ' detections = self.detection_model.postprocess(prediction_dict, shapes)\\n'
self.modelfile += ' return detections\\n'
self.modelfile += ' def load_image_into_numpy_array(path):\\n'
self.modelfile += ' return np.array(Image.open(path))\\n'
self.modelfile += ' imageLocation = []\\n'
self.modelfile += ' for i, row in X.iterrows():\\n'
self.modelfile += ' if ("confidance" in row) and row["confidance"] <= 1.0:\\n'
self.modelfile += ' confidance = row["confidance"]\\n'
self.modelfile += ' else:\\n'
self.modelfile += ' confidance = 0.8\\n'
self.modelfile += ' imageName = str(Path(row["imagepath"]).stem)+"_output"+str(Path(row["imagepath"]).suffix)\\n'
self.modelfile += ' image_np = load_image_into_numpy_array(row["imagepath"])\\n'
self.modelfile += ' input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)\\n'
self.modelfile += ' detections = detect_fn(input_tensor)\\n'
self.modelfile += ' num_detections = int(detections.pop("num_detections"))\\n'
self.modelfile += ' detections = {key: value[0, :num_detections].numpy()\\n\\
for key, value in detections.items()}\\n'
self.modelfile += ' detections["num_detections"] = num_detections\\n'
self.modelfile += ' detections["detection_classes"] = detections["detection_classes"].astype(np.int64)\\n'
self.modelfile += ' label_id_offset = 1\\n'
self.modelfile += ' image_np_with_detections = image_np.copy()\\n'
self.modelfile += ' viz_utils.visualize_boxes_and_labels_on_image_array(\\n\\
image_np_with_detections,\\n\\
detections["detection_boxes"],\\n\\
detections["detection_classes"]+label_id_offset,\\n\\
detections["detection_scores"],\\n\\
self.category_index,\\n\\
use_normalized_coordinates=True,\\n\\
max_boxes_to_draw=200,\\n\\
min_score_thresh=confidance,\\n\\
agnostic_mode=False)\\n'
self.modelfile += ' plt.figure()\\n'
self.modelfile += ' plt.imsave(os.path.join(self.MODEL_LOCATION,imageName), image_np_with_detections)\\n'
self.modelfile += ' imageLocation.append(os.path.join(self.MODEL_LOCATION,imageName))\\n'
self.modelfile += ' plt.show()\\n'
self.modelfile += ' return imageLocation\\n'
else:
if(learner_type == 'DL' and model != 'Neural Network'):
self.modelfile += ' X = np.expand_dims(X, axis=2)'
self.modelfile += '\\n'
if(learner_type == 'TextDL'):
self.modelfile += ' return pd.DataFrame(np.argmax(self.model.predict(X),axis=1))'
self.modelfile += '\\n'
elif(learner_type == 'TextML'):
self.modelfile += ' return pd.DataFrame(self.model.predict_proba(X),columns=self.model.classes_)'
self.modelfile += '\\n'
elif(learner_type == 'DL' and model_type == 'Classification'):
self.modelfile += ' X = X.astype(np.float32)'
self.modelfile += '\\n'
self.modelfile += ' return pd.DataFrame(np.argmax(self.model.predict(X),axis=1))'
self.modelfile += '\\n'
else:
if(model_type == 'Classification' or model_type == 'TLClassification'):
if model == 'Neural Architecture Search':
self.modelfile += ' X = X.astype(np.float32)'
self.modelfile += '\\n'
self.modelfile += ' return pd.DataFrame(self.model.predict(X))'
self.modelfile += '\\n'
else:
if optimizationmethod == 'genetic':
self.modelfile += '\\n'
self.modelfile += ' try:'
self.modelfile += '\\n'
self.modelfile += ' return pd.DataFrame(self.model.predict_proba(X))'
self.modelfile += '\\n'
self.modelfile += ' except:'
self.modelfile += '\\n'
self.modelfile += ' return pd.DataFrame(self.model.predict(X))'
else:
self.modelfile += ' X = X.astype(np.float32)'
self.modelfile += '\\n'
if model.lower() == 'deep q network' or model.lower() == 'dueling deep q network':
self.modelfile += ' q, _ = self.model(np.array(X), step_type=constant([time_step.StepType.FIRST] * np.array(X).shape[0]), training=False)'
self.modelfile += '\\n'
self.modelfile += ' return pd.DataFrame(q.numpy())'
else:
self.modelfile += ' return pd.DataFrame(self.model.predict_proba(X), columns=self.model.classes_)'
self.modelfile += '\\n'
elif model_type == 'Regression' and model == 'NAS':
self.modelfile += \\
"""
X = X.astype(np.float32)
return self.model.predict(X)
"""
elif(learner_type == 'TS'):
if model.lower() |
== 'fbprophet':
self.modelfile += ' sessonal_freq="'+str(sessonal_freq)+'"'
self.modelfile += '\\n'
self.modelfile += ' ts_prophet_future = self.model.make_future_dataframe(periods=in |
case_name'],self.params['usecase_ver'], self.name)
def create_idrift(self):
pass
def create_odrift(self):
pass
def create_utils_folder(self):
common.create_util_folder(self.deploy_path)
<s><s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from importlib.metadata import version
import sys
class importModule():
def __init__(self):
self.importModule = {}
self.stdlibModule = []
self.localModule = {}
def addLocalModule(self,module, mod_from=None, mod_as=None):
if module == '*':
if module not in self.localModule.keys():
self.localModule[module]= [mod_from]
else:
self.localModule[module].append(mod_from)
elif module not in self.localModule.keys():
self.localModule[module] = {'from':mod_from, 'as':mod_as}
def addModule(self, module, mod_from=None, mod_as=None):
if module not in self.importModule.keys():
self.importModule[module] = {'from':mod_from, 'as':mod_as}
if module in sys.stdlib_module_names:
self.stdlibModule.append(module)
elif isinstance(self.importModule[module], list):
if mod_as not in [x['as'] for x in self.importModule[module]]:
self.importModule[module].append({'from':mod_from, 'as':mod_as})
elif mod_as not in [x['from'] for x in self.importModule[module]]:
self.importModule[module].append({'from':mod_from, 'as':mod_as})
elif mod_as != self.importModule[module]['as']:
as_list = [self.importModule[module]]
as_list.append({'from':mod_from, 'as':mod_as})
self.importModule[module] = as_list
elif mod_from != self.importModule[module]['from']:
as_list = [self.importModule[module]]
as_list.append({'from':mod_from, 'as':mod_as})
self.importModule[module] = as_list
def getModules(self):
return (self.importModule, self.stdlibModule)
def getBaseModule(self, extra_importers=[]):
modules_alias = { 'sklearn':'scikit-learn',
'genetic_selection':'sklearn-genetic',
'google': 'google-cloud-storage',
'azure':'azure-storage-file-datalake'}
local_modules = {'AIX':'/app/AIX-0.1-py3-none-any.whl'}
modules = []
require = ""
if extra_importers:
extra_importers = [importer.importModule for importer in extra_importers if isinstance(importer, importModule)]
importers_module = [self.importModule] + extra_importers
for importer_module in importers_module:
for k,v in importer_module.items():
if v['from']:
mod = v['from'].split('.')[0]
else:
mod = k
if mod in modules_alias.keys():
mod = modules_alias[mod]
modules.append(mod)
modules = list(set(modules))
for mod in modules:
try:
if mod in local_modules.keys():
require += f"{local_modules[mod]}\\n"
else:
require += f"{mod}=={version(mod)}\\n"
except :
if mod not in sys.stdlib_module_names:
raise
return require
def getCode(self):
def to_string(k, v):
mod = ''
if v['from']:
mod += 'from {} '.format(v['from'])
mod += 'import {}'.format(k)
if v['as']:
mod += ' as {} '.format(v['as'])
return mod
modules = ""
local_modules = ""
std_lib_modules = ""
third_party_modules = ""
for k,v in self.importModule.items():
if k in self.stdlibModule:
std_lib_modules = std_lib_modules + '\\n' + to_string(k, v)
elif isinstance(v, dict):
third_party_modules = third_party_modules + '\\n' + to_string(k, v)
elif isinstance(v, list):
for alias in v:
third_party_modules = third_party_modules + '\\n' + to_string(k, alias)
for k,v in self.localModule.items():
if k != '*':
local_modules = local_modules + '\\n' + to_string(k, v)
else:
for mod_from in v:
local_modules = local_modules + '\\n' + f'from {mod_from} import {k}'
if std_lib_modules:
modules = modules + "\\n#Standard Library modules" + std_lib_modules
if third_party_modules:
modules = modules + "\\n\\n#Third Party modules" + third_party_modules
if local_modules:
modules = modules + "\\n\\n#local modules" + local_modules + '\\n'
return modules
def copyCode(self, importer):
self.importModule, self.stdlibModule = importer.getModules()
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os,sys
import platform
import json
import shutil
import logging
from pathlib import Path
from prediction_package import production
from prediction_package import prediction_transformation as cs
class DeploymentManager:
def __init__(self):
self.requirementfile=''
self.modelfile=''
self.s2i_environmentfile=''
self.selectorfile=''
self.profilerfile=''
self.readmepackagename=''
self.pythonpackage=''
self.log = logging.getLogger('eion')
def include_import_file(self,learner_type,method,scoreParam,model_type,model):
if((learner_type == 'DL') or (learner_type == 'TextDL')):
self.modelfile += 'from tensorflow.keras.models import load_model'
self.modelfile += '\\n'
self.modelfile += 'from tensorflow.keras import backend as K'
self.modelfile += '\\n'
self.modelfile += 'import tensorflow as tf'
self.modelfile += '\\n'
if (learner_type == 'ML' and model_type.lower()=='anomaly_detection' and model.lower() == 'autoencoder'):
self.modelfile += 'import joblib'
self.modelfile += '\\n'
self.modelfile += 'import os'
self.modelfile += '\\n'
self.modelfile += 'import pandas as pd'
self.modelfile += '\\n'
self.modelfile += 'import numpy as np'
self.modelfile += '\\n'
self.modelfile += 'from pathlib import Path'
self.modelfile += '\\n'
self.modelfile += 'import tensorflow as tf'
self.modelfile += '\\n'
self.modelfile += 'from keras.models import load_model'
self.modelfile += '\\n'
self.modelfile += 'import warnings'
self.modelfile += '\\n'
self.modelfile += 'from sklearn.preprocessing import StandardScaler'
self.modelfile += '\\n'
self.modelfile += 'warnings.filterwarnings("ignore")'
self.modelfile += '\\n'
if(learner_type == 'ImageClassification'):
self.modelfile += 'import os'
self.modelfile += '\\n'
self.modelfile += 'from tensorflow.keras.models import Sequential'
self.modelfile += '\\n'
self.modelfile += 'from tensorflow.keras.layers import Dense, Dropout, Flatten'
self.modelfile += '\\n'
self.modelfile += 'from tensorflow.keras.preprocessing import image'
self.modelfile += '\\n'
self.modelfile += 'import numpy as np'
self.modelfile += '\\n'
self.modelfile += 'import tensorflow as tf'
self.modelfile += '\\n'
self.modelfile += 'from tensorflow.keras.layers import Input'
self.modelfile += '\\n'
self.modelfile += 'from tensorflow.keras.models import Model'
self.modelfile += '\\n'
self.modelfile += 'from tensorflow.keras.optimizers import Adam'
self.modelfile += '\\n'
self.modelfile += 'import cv2'
self.modelfile += '\\n'
if(learner_type == 'objectDetection'):
self.modelfile += 'import os\\n'
self.modelfile += 'from object_detection.utils import label_map_util\\n'
self.modelfile += 'from object_detection.utils import config_util\\n'
self.modelfile += 'from object_detection.utils import visualization_utils as viz_utils\\n'
self.modelfile += 'from object_detection.builders import model_builder\\n'
self.modelfile += 'import tensorflow as tf\\n'
self.modelfile += 'import numpy as np\\n'
self.modelfile += 'from PIL import Image\\n'
self.modelfile += 'import matplotlib.pyplot as plt\\n'
self.modelfile += 'import pandas as pd\\n'
self.modelfile += 'from pathlib import Path\\n'
if(learner_type == 'Text Similarity'):
self.modelfile += 'from tensorflow.keras.models import load_model'
self.modelfile += '\\n'
self.modelfile += 'from tensorflow.keras import backend as K'
self.modelfile += '\\n'
self.modelfile += 'from tensorflow.keras.preprocessing.sequence import pad_sequences'
self.modelfile += '\\n'
self.modelfile += 'from tensorflow.keras.preprocessing.text import Tokenizer'
self.modelfile += '\\n'
self.modelfile += 'import tensorflow as tf'
self.modelfile += '\\n'
if(model == 'Neural Architecture Search'):
self.modelfile += 'from tensorflow.keras.models import load_model'
self.modelfile += '\\n'
self.modelfile += 'from tensorflow.keras import backend as K'
self.modelfile += '\\n'
self.modelfile += 'import tensorflow as tf'
self.modelfile += '\\n'
self.modelfile += 'import joblib'
self.modelfile += '\\n'
self.modelfile += 'import os'
self.modelfile += '\\n'
self.modelfile += 'import pandas as pd'
self.modelfile += '\\n'
self.modelfile += 'from sklearn.decomposition import LatentDirichletAllocation\\n'
self.modelfile += 'import numpy as np\\n'
self.modelfile += 'from pathlib import Path\\n'
if model.lower() == 'deep q network' or model.lower() == 'dueling deep q network':
self.modelfile += 'from tensorflow import constant'
self.modelfile += '\\n'
self.modelfile += 'from tf_agents.trajectories import time_step'
self.modelfile += '\\n'
self.requirementfile += 'tensorflow==2.5.0'
if model.lower() == 'lstm' or model.lower() == 'mlp':
self.modelfile += 'from tensorflow.keras.models import load_model'
self.modelfile += '\\n'
self.requirementfile += 'tensorflow==2.5.0'
if(learner_type == 'Text Similarity'):
self.modelfile += 'def cosine_distance(vests):'
self.modelfile += '\\n';
self.modelfile += ' x, y = vests'
self.modelfile += '\\n';
self.modelfile += ' x = K.l2_normalize(x, axis=-1)'
self.modelfile += '\\n';
self.modelfile += ' y = K.l2_normalize(y, axis=-1)'
self.modelfile += '\\n';
self.modelfile += ' return -K.mean(x * y, axis=-1, keepdims=True)'
self.modelfile += '\\n';
self.modelfile += 'def cos_dist_output_shape(shapes):'
self.modelfile += '\\n';
self.modelfile += ' shape1, shape2 = shapes'
self.modelfile += '\\n';
self.modelfile += ' return (shape1[0],1)'
self.modelfile += '\\n';
if(learner_type == 'TextDL' or learner_type == 'DL'):
if(scoreParam.lower() == 'recall' or scoreParam.lower() == 'f1_score'):
self.modelfile += 'def recall_m(y_true, y_pred):'
self.modelfile += '\\n';
self.modelfile += ' true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))'
self.modelfile += '\\n';
self.modelfile += ' possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))'
self.modelfile += '\\n';
self.modelfile += ' recall = true_positives / (possible_positives + K.epsilon())'
self.modelfile += '\\n';
self.modelfile += ' return recall'
self.modelfile += '\\n';
if(scoreParam.lower() == 'precision' or scoreParam.lower() == 'f1_score'):
self.modelfile += 'def precision_m(y_true, y_pred):'
self.modelfile += '\\n';
self.modelfile += ' true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))'
self.modelfile += '\\n';
self.modelfile += ' predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))'
self.modelfile += '\\n';
self.modelfile += ' precision = true_positives / (predicted_ |
positives + K.epsilon())'
self.modelfile += '\\n';
self.modelfile += ' return precision'
self.modelfile += '\\n';
if(scoreParam.lower() == 'f1_score'):
self.modelfile += 'def f1_m(y_true, y_pred):'
self.modelfile += '\\n';
self.modelfile += ' precision = precision_m(y_true, y_pred)'
self.modelfile += '\\n';
self.modelfile += ' recall = recall_m(y_true, y_pred)'
self.modelfile += '\\n';
self.modelfile += ' return 2*((precision*recall)/(precision+recall+K.epsilon()))'
self.modelfile += '\\n';
if(scoreParam.lower() == 'rmse'):
self.modelfile += 'def rmse_m(y_true, y_pred):'
self.modelfile += '\\n';
self.modelfile += ' return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))'
self.modelfile += '\\n';
if(scoreParam.lower() =='r2'):
self.modelfile += 'def r_square(y_true, y_pred):'
self.modelfile += '\\n';
self.modelfile += ' SS_res = K.sum(K.square(y_true-y_pred))'
self.modelfile += '\\n';
self.modelfile += ' SS_tot = K.sum(K.square(y_true-K.mean(y_true)))'
self.modelfile += '\\n';
self.modelfile += ' return (1 - SS_res/(SS_tot+K.epsilon()))'
self.modelfile += '\\n';
if(learner_type.lower() in ['similarityidentification','contextualsearch']):
self.modelfile += 'from pathlib import Path\\n'
if model_type == 'BM25':
self.modelfile += 'from rank_bm25 import BM25Okapi\\n'
elif scoreParam == 'VectorDB Cosine':
self.modelfile += 'import chromadb\\n'
else:
self.modelfile += 'from sklearn.metrics.pairwise import cosine_similarity\\n'
self.pythonpackage += '========== Python Packags Requires ========='
self.pythonpackage += '\\n'
self.pythonpackage += 'scikit-learn'
self.pythonpackage += '\\n'
self.pythonpackage += 'scipy'
self.pythonpackage += '\\n'
self.pythonpackage += 'numpy'
self.pythonpackage += '\\n'
if((learner_type == 'DL') or (learner_type =='TextDL')):
self.modelfile += 'import numpy as np'
self.modelfile += '\\n'
self.requirementfile += 'scikit-learn==0.21.3'
self.requirementfile += '\\n'
self.requirementfile += 'scipy==1.3.3'
self.requirementfile += '\\n'
self.requirementfile += 'numpy==1.17.4'
self.requirementfile += '\\n'
if(learner_type == 'TextML'):
self.requirementfile += 'spacy==2.2.3'
self.requirementfile += '\\n'
self.requirementfile += 'https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-2.2.0/en_core_web_sm-2.2.0.tar.gz'
self.requirementfile += '\\n'
if(learner_type == 'DL' or learner_type == 'TextDL'):
self.requirementfile += 'keras==2.3.1'
self.requirementfile += '\\n'
self.requirementfile += 'tensorflow==2.0.0b1'
self.requirementfile += '\\n'
if(learner_type == 'RecommenderSystem'):
self.requirementfile += 'surprise'
self.requirementfile += '\\n'
if(method == 'package'):
self.modelfile += 'import surprise'
self.modelfile += '\\n'
self.modelfile += 'import statsmodels'
self.modelfile += '\\n'
self.requirementfile += 'statsmodels==0.10.2'
self.requirementfile += '\\n'
def crate_readme_file(self,deploy_path,modelfile,features,method,single_file=False):
self.readme='========== Files Structures =========='
self.readme+='\\n'
self.readme+=modelfile+' ------ Trained Model'
self.readme+='\\n'
self.readme+='aion_prediction.py --> Python package entry point'
self.readme+='\\n'
if not single_file:
self.readme+='script/inputprofiler.py --> Profiling like FillNA and Category to Numeric'
self.readme+='\\n'
self.readme+='script/selector.py --> Feature Selection'
self.readme+='\\n'
self.readme+='script/trained_model.py --> Read the model file and call the prediction'
self.readme+='\\n'
self.readme+='script/output_format.py --> Output formatter file'
self.readme+='\\n'
self.readme+= self.pythonpackage
self.readme+= '========== How to call the model =========='
self.readme+='\\n'
self.readme+= '============== From Windows Terminal =========='
self.readme+='\\n'
if method == 'optimus_package':
self.readme += 'python aion_prediction.py filename.json'
self.readme +='\\n'
self.readme += '========== Embedded Methods =========='
self.readme +='\\n'
self.readme += 'Function Name: predict_from_json - When input is Json Data'
self.readme +='\\n'
self.readme += 'Function Name: predict_from_file - When input is Json File'
self.readme +='\\n'
else:
callpython = 'python aion_prediction.py "[{'
for x in features:
if(callpython != 'python prediction.py "[{'):
callpython += ','
callpython += '\\\\\\"'+str(x)+'\\\\\\"'+':'+'\\\\\\"'+str(x)+'_value'+'\\\\\\"'
callpython += '}]"'
self.readme += callpython
self.readme+='\\n'
self.readme+= '============== From Linux Terminal =========='
self.readme+='\\n'
callpython = 'python aion_prediction.py \\'[{'
temp =callpython
for x in features:
if(callpython != temp):
callpython += ','
callpython += '"'+str(x)+'"'+':'+'"'+str(x)+'_value'+'"'
callpython += '}]\\''
self.readme += callpython
self.readme+='\\n'
self.readme+= '============== Output =========='
self.readme+='\\n'
self.readme+= '{"status":"SUCCESS","data":[{"Data1":"Value","prediction":"Value"}]}' ## For Single Row/Record'
self.readme+='\\n'
self.readme+= '{"status":"SUCCESS","data":[{"Data1":"Value","prediction":"Value"},{"Data1":"Value","prediction":"Value"}]} ## For Multiple Row/Record'
self.readme+='\\n'
self.readme+= '{"status":"ERROR","message":"description"} ## In Case Exception or Error'
self.readme+='\\n'
#print(self.readme)
filename = os.path.join(deploy_path,'readme.txt')
self.log.info('-------> Readme File Location: '+filename)
f = open(filename, "wb")
f.write(str(self.readme).encode('utf8'))
f.close()
def create_class(self,classname):
#self.modelfile += 'class '+classname+'(object):'
self.modelfile += 'class trained_model(object):'
self.modelfile += '\\n'
def profiler_code(self,model_type,model,output_columns, features, text_feature,wordToNumericFeatures=[], deploy={},datetimeFeature=''):
profiler = deploy.get('profiler',{})
if isinstance(features, str):
features = features.split(',')
code = f"""
import scipy
import joblib
import numpy as np
import pandas as pd
from pathlib import Path
"""
if text_feature:
code += """
import importlib.util\\n"""
if wordToNumericFeatures:
code += """
from word2number import w2n
def s2n(value):
try:
x=eval(value)
return x
except:
try:
return w2n.word_to_num(value)
except:
return np.nan
"""
if 'code' in deploy.get('preprocess',{}).keys():
code += deploy['preprocess']['code']
if profiler.get('conversion_method','').lower() == 'glove':
code += """
class inputprofiler(object):
def __init__(self):
self.model = None
preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl'
if preprocess_path.exists():
self.model = joblib.load(preprocess_path)
from text.Embedding import load_pretrained
from text import TextProcessing
model_path = TextProcessing.checkAndDownloadPretrainedModel('glove')
embed_size, loaded_model = load_pretrained(model_path)
self.model.set_params(text_process__vectorizer__external_model = loaded_model)
else:
raise ValueError('Preprocess model not found')
def apply_profiler(self,df):
df = df.replace(r'^\\s*$', np.NaN, regex=True)
"""
elif profiler.get('conversion_method','').lower() == 'fasttext':
code += """
def get_pretrained_model_path():
try:
from AION.appbe.dataPath import DATA_DIR
modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextProcessing'
except:
modelsPath = Path('aion')/'PreTrainedModels'/'TextProcessing'
if not modelsPath.exists():
modelsPath.mkdir(parents=True, exist_ok=True)
return modelsPath
class inputprofiler(object):
def __init__(self):
self.model = None
preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl'
if preprocess_path.exists():
self.model = joblib.load(preprocess_path)
if not importlib.util.find_spec('fasttext'):
raise ValueError('fastText not installed')
else:
import os
import fasttext
import fasttext.util
cwd = os.getcwd()
os.chdir(get_pretrained_model_path())
fasttext.util.download_model('en', if_exists='ignore')
loaded_model = fasttext.load_model('cc.en.300.bin')
os.chdir(cwd)
self.model.set_params(text_process__vectorizer__external_model = loaded_model)
self.model.set_params(text_process__vectorizer__external_model_type = 'binary')
else:
raise ValueError('Preprocess model not found')
def apply_profiler(self,df):
df = df.replace(r'^\\s*$', np.NaN, regex=True)
"""
else:
code += """
class inputprofiler(object):
def __init__(self):
self.model = None
preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl'
if preprocess_path.exists():
self.model = joblib.load(preprocess_path)
else:
raise ValueError('Preprocess model not found')
def apply_profiler(self,df):
df = df.replace(r'^\\s*$', np.NaN, regex=True)
"""
if 'code' in deploy.get('preprocess',{}).keys():
code += " df = preprocess( df)\\n"
if wordToNumericFeatures:
code += f"""
df[{wordToNumericFeatures}] = df[{wordToNumericFeatures}].apply(lambda x: s2n(x))"""
if profiler.get('unpreprocessed_columns'):
code += f"""
unpreprocessed_data = df['{profiler['unpreprocessed_columns'][0]}']
df.drop(['{profiler['unpreprocessed_columns'][0]}'], axis=1,inplace=True)
"""
if profiler.get('force_numeric_conv'):
code += f"""
df[{profiler['force_numeric_conv']}] = df[{profiler['force_numeric_conv']}].apply(pd.to_numeric,errors='coerce')
"""
code += f"""
if self.model:
df = self.model.transform(df)"""
code += f"""
columns = {output_columns}
if isinstance(df, scipy.sparse.spmatrix):
df = pd.DataFrame(df.toarray(), columns=columns)
else:
df = pd.DataFrame(df, columns=columns)
"""
##The below if loop for avoiding unpreprocessed column variable storing which is not used for anomaly detection
if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na':
pass
else:
if profiler.get('unpreprocessed_columns'):
code += f"""
df['{profiler.get('unpreprocessed_columns')[0]}'] = unpreprocessed_data
"""
if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na':
##This below set_index is wrong, because we drop datetimefeature before profiling and doing set_index. So commented now.
# code += f"""
# df.set_index('{datetimeFeature}', inplace=True)"""
code += f"""
return(df,'{datetimeFeature}')\\n"""
else:
code += f"""
return(df)"""
return code
def no_profiling_code(self, features):
if isinstance(features, str):
features = features.split(',')
return f"""
import pandas as pd
import numpy as np
class inputprofiler(object):
def apply_profiler(self,df):
df = df.replace(r'^\\s*$', np.NaN, regex=True)
return df[{features}]
"""
def create_profiler_file(self,learner_type,deploy_path,profiler,features,numericToLabel_json,column_merge_flag,text_features,preprocessing_pipe,firstDocFeature,secondDocFeature,normalizer,normFeatures,wordToNumericFeatures,conversion_method,model_type,preprocess_pipe,preprocess_out_columns, label_encoder,model, config=None |
,datetimeFeature=''):
filename = str(Path(deploy_path)/'script'/'inputprofiler.py')
if 'profiler' in config:
if model_type == 'BM25':
code = self.profiler_code(model_type,model,['tokenize'],features, text_features,config['profiler']['word2num_features'])
elif model == 'KaplanMeierFitter':
code = self.no_profiling_code(features)
elif model.lower() in ['arima', 'fbprophet']: #task 12627
code = self.no_profiling_code('noofforecasts')
else:
code = self.profiler_code(model_type,model,config['profiler']['output_features'],features, text_features,config['profiler']['word2num_features'],config,datetimeFeature)
if code:
with open(filename,'w',encoding="utf-8") as f:
f.write(code)
self.log.info('-------> Profiler File Location :'+filename)
return
self.profilerfile += 'import pandas as pd'
self.profilerfile += '\\n'
self.profilerfile += 'import joblib'
self.profilerfile += '\\n'
self.profilerfile += 'import os'
self.profilerfile += '\\n'
self.profilerfile += 'from word2number import w2n'
self.profilerfile += '\\n'
self.profilerfile += 'import numpy as np'
self.profilerfile += '\\nfrom pathlib import Path\\n'
#print("1")
#print(profiler)
if(learner_type == 'Text Similarity' or len(text_features) > 0):
self.profilerfile += 'from text import TextProcessing'
self.profilerfile += '\\n'
self.profilerfile += 'def textCleaning(textCorpus):'
self.profilerfile += '\\n'
self.profilerfile += ' textProcessor = TextProcessing.TextProcessing()'
self.profilerfile += '\\n'
self.profilerfile += ' textCorpus = textProcessor.transform(textCorpus)'
self.profilerfile += '\\n'
self.profilerfile += ' return(textCorpus)'
self.profilerfile += '\\n'
self.profilerfile += 'class inputprofiler(object):'
self.profilerfile += '\\n'
self.profilerfile += ' def s2n(self,value):'
self.profilerfile += '\\n'
self.profilerfile += ' try:'
self.profilerfile += '\\n'
self.profilerfile += ' x=eval(value)'
self.profilerfile += '\\n'
self.profilerfile += ' return x'
self.profilerfile += '\\n'
self.profilerfile += ' except:'
self.profilerfile += '\\n'
self.profilerfile += ' try:'
self.profilerfile += '\\n'
self.profilerfile += ' return w2n.word_to_num(value)'
self.profilerfile += '\\n'
self.profilerfile += ' except:'
self.profilerfile += '\\n'
self.profilerfile += ' return np.nan '
self.profilerfile += '\\n'
self.profilerfile += ' def apply_profiler(self,df):'
self.profilerfile += '\\n'
if(len(wordToNumericFeatures) > 0):
for w2nFeature in wordToNumericFeatures:
if w2nFeature not in features:
continue
self.profilerfile += " df['"+w2nFeature+"']=df['"+w2nFeature+"'].apply(lambda x: self.s2n(x))"
self.profilerfile += '\\n'
self.profilerfile += " df = df.replace(r'^\\s*$', np.NaN, regex=True)"
self.profilerfile += '\\n'
self.profilerfile += ' try:'
self.profilerfile += '\\n'
self.profilerfile += ' df.dropna(how="all",axis=1,inplace=True)'
self.profilerfile += '\\n'
self.profilerfile += ' except:'
self.profilerfile += '\\n'
self.profilerfile += ' df.fillna(0)'
self.profilerfile += '\\n'
if model_type.lower() != 'timeseriesforecasting': #task 11997
self.profilerfile += ' preprocess_path = Path(__file__).parent.parent/"model"/"preprocess_pipe.pkl"\\n'
self.profilerfile += ' if preprocess_path.exists():\\n'
self.profilerfile += ' model = joblib.load(preprocess_path)\\n'
if model_type.lower()=='anomaly_detection' and model.lower() == 'autoencoder':
self.profilerfile += f" df[{features}] = model.transform(df[{features}])\\n"
else:
self.profilerfile += f" df = model.transform(df)\\n"
if 'operation' in profiler:
y = profiler['operation']
for action in y:
feature = action['feature']
#if feature not in features:
# continue
operation = action['Action']
if(operation == 'Drop'):
self.profilerfile += " if '"+feature+"' in df.columns:"
self.profilerfile += '\\n'
self.profilerfile += " df.drop(columns=['"+feature+"'],inplace = True)"
self.profilerfile += '\\n'
if(operation == 'FillValue'):
self.profilerfile += " if '"+feature+"' in df.columns:"
self.profilerfile += '\\n'
fvalue = action['value']
self.profilerfile += " df['"+feature+"'] = df['"+feature+"'].fillna(value='"+fvalue+"')"
self.profilerfile += '\\n'
if(operation == 'Encoder'):
value = action['value']
value = value.replace("\\n", "\\\\n")
self.profilerfile += " if '"+feature+"' in df.columns:"
self.profilerfile += '\\n'
self.profilerfile += " le_dict="+str(value)
self.profilerfile += '\\n'
self.profilerfile += " df['"+feature+"'] = df['"+feature+"'].apply(lambda x: le_dict.get(x,-1))"
self.profilerfile += '\\n'
self.profilerfile += " if -1 in df['"+feature+"'].values:"
self.profilerfile += '\\n'
self.profilerfile += " raise Exception('Category value of "+feature+" not present in training data')"
self.profilerfile += '\\n'
if 'conversion' in profiler:
catergoryConverton = profiler['conversion']
#print(catergoryConverton)
if (catergoryConverton['categoryEncoding'].lower() in ['targetencoding','onehotencoding']) and ('features' in catergoryConverton):
self.profilerfile += " encoder = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','categoryEncoder.pkl'))"
self.profilerfile += '\\n'
self.profilerfile += " CategoryFeatures = "+str(catergoryConverton['features'])
self.profilerfile += '\\n'
if catergoryConverton['categoryEncoding'].lower() == 'onehotencoding':
self.profilerfile += " transformed_data = encoder.transform(df[CategoryFeatures]).toarray()"
self.profilerfile += '\\n'
self.profilerfile += " feature_labels = encoder.get_feature_names(CategoryFeatures)"
self.profilerfile += '\\n'
self.profilerfile += " transformed_data = pd.DataFrame(transformed_data,columns=feature_labels) "
self.profilerfile += '\\n'
else:
self.profilerfile += " transformed_data = encoder.transform(df[CategoryFeatures])"
self.profilerfile += '\\n'
self.profilerfile += " dataColumns=list(df.columns)"
self.profilerfile += '\\n'
self.profilerfile += " nonNormFeatures=list(set(dataColumns) - set(CategoryFeatures))"
self.profilerfile += '\\n'
self.profilerfile += " dataArray=df[nonNormFeatures]"
self.profilerfile += '\\n'
self.profilerfile += " df = pd.concat([dataArray, transformed_data],axis=1)"
self.profilerfile += '\\n'
y = json.loads(numericToLabel_json)
for feature_details in y:
feature = feature_details['feature']
if feature not in features:
continue
label = feature_details['Labels']
bins = feature_details['Bins']
self.profilerfile += " if '"+feature+"' in df.columns:"
self.profilerfile += '\\n'
self.profilerfile += " cut_bins="+str(bins)
self.profilerfile += '\\n'
self.profilerfile += " cut_labels="+str(label)
self.profilerfile += '\\n'
self.profilerfile += " df['"+feature+"'] = pd.cut(df['"+feature+"'],bins=cut_bins,labels=cut_labels)"
self.profilerfile += '\\n'
self.profilerfile += " df['"+feature+"'] = df['"+feature+"'].fillna(value=0)"
self.profilerfile += '\\n'
if(len(text_features) > 0):
if(len(text_features) > 1):
self.profilerfile += ' merge_features = '+str(text_features)
self.profilerfile += '\\n'
self.profilerfile += ' df[\\'combined\\'] = df[merge_features].apply(lambda row: \\' \\'.join(row.values.astype(str)), axis=1)'
self.profilerfile += '\\n'
self.profilerfile += ' features = [\\'combined\\']'
self.profilerfile += '\\n'
else:
self.profilerfile += " features = "+str(text_features)
self.profilerfile += '\\n'
if model_type == 'BM25':
self.profilerfile += """\\
df_text = df[features[0]]
pipe = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','{preprocessing_pipe}'))
df['tokenize'] = pipe.transform(df_text)\\n""".format(preprocessing_pipe=preprocessing_pipe)
elif conversion_method == 'sentenceTransformer':
self.profilerfile += """\\
df_text = df[features[0]]
from sentence_transformers import SentenceTransformer
model = SentenceTransformer(\\'sentence-transformers/msmarco-distilroberta-base-v2\\')
df_vect = model.encode(df_text)
for empCol in {text_features}:
df = df.drop(columns=[empCol])
if isinstance(df_vect, np.ndarray):
df1 = pd.DataFrame(df_vect)
else:
df1 = pd.DataFrame(df_vect.toarray(),columns = pipe.named_steps[\\'vectorizer\\'].get_feature_names())
df1 = df1.add_suffix(\\'_vect\\')
df = pd.concat([df, df1],axis=1)\\n""".format(preprocessing_pipe=preprocessing_pipe, text_features=text_features)
else:
self.profilerfile += """\\
df_text = df[features[0]]
pipe = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','{preprocessing_pipe}'))
df_vect=pipe.transform(df_text)
for empCol in {text_features}:
df = df.drop(columns=[empCol])
if isinstance(df_vect, np.ndarray):
df1 = pd.DataFrame(df_vect)
else:
df1 = pd.DataFrame(df_vect.toarray(),columns = pipe.named_steps[\\'vectorizer\\'].get_feature_names())
df1 = df1.add_suffix(\\'_vect\\')
df = pd.concat([df, df1],axis=1)\\n""".format(preprocessing_pipe=preprocessing_pipe, text_features=text_features)
if(learner_type == 'Text Similarity'):
self.profilerfile += ' df[\\''+firstDocFeature+'\\'] = textCleaning(df[\\''+firstDocFeature+'\\'])'
self.profilerfile += '\\n'
self.profilerfile += ' df[\\''+secondDocFeature+'\\'] = textCleaning(df[\\''+secondDocFeature+'\\'])'
self.profilerfile += '\\n'
if len(normFeatures) > 0 and normalizer != '':
self.profilerfile += " normFeatures = "+str(normFeatures)
self.profilerfile += '\\n'
self.profilerfile += ' normalizepipe = joblib.load(os.path.join(os.path.dirname(os.path.abspath(__file__)),\\'..\\',\\'model\\',\\''+normalizer+'\\'))'
self.profilerfile += '\\n'
self.profilerfile += ' dataColumns=list(df.columns)'
self.profilerfile += '\\n'
self.profilerfile += ' nonNormFeatures=list(set(dataColumns) - set(normFeatures))'
self.profilerfile += '\\n'
self.profilerfile += ' dataframe=df[normFeatures]'
self.profilerfile += '\\n'
self.profilerfile += ' transDf = normalizepipe.transform(dataframe)'
self.profilerfile += '\\n'
self.profilerfile += ' nontransDF=df[nonNormFeatures].values'
self.profilerfile += '\\n'
self.profilerfile += ' dataColumns=normFeatures+nonNormFeatures'
self.profilerfile += '\\n'
self.profilerfile += ' scaledDf = pd.DataFrame(np.hstack((transDf, nontransDF)),columns=dataColumns)'
self.profilerfile += '\\n'
self.profilerfile += ' df=scaledDf'
self.profilerfile += '\\n'
else:
self.profilerfile += ' df=df.dropna()\\n'
self.profilerfile += ' return(df)'
filename = os.path.join(deploy_path,'script','inputprofiler.py')
self.log.info('------ |
-> Profiler File Location :'+filename)
f = open(filename, "w",encoding="utf-8")
f.write(str(self.profilerfile))
f.close()
def isEnglish(self, s):
try:
s.encode(encoding='utf-8').decode('ascii')
exce |
== 'classification' or model_type.lower() == 'regression' or model_type.lower() == 'timeseriesforecasting': #task 11997
predictionObj.create_drift_file(deploy_path,features,targetFeature,model_type)
if model_type.lower() = |
.params['features']['text_feat']:
obj.create_text_drift_file(self.deploy_path,self.params['features']['text_feat'],self.params['features']['target_feat'],self.name)
else:
obj.create_drift_file(self.deploy_path,self.params['features']['input_feat'],self.params['features']['target_feat'],self.name)
def create_odrift(self):
obj = aionPrediction()
if self.params['features']['text_feat']:
obj.create_regression_text_performance_file(self.deploy_path,self.params['features']['text_feat'],self.params['features']['target_feat'])
else:
obj.create_regression_performance_file(self.deploy_path,self.params['features']['input_feat'],self.params['features']['target_feat'])
def training_code( self):
self.importer.addModule(module='pandas',mod_as='pd')
code = f"""
class trainer():
"""
init_code = f"""
def __init__( self):
model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}"
if not model_file.exists():
raise ValueError(f'Trained model file not found: {{model_file}}')
"""
run_code = f"""
def run(self, df):\\
"""
if self.params['training']['algo'] in ['Neural Architecture Search']:
self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models')
self.importer.addModule(module='autokeras',mod_as='ak')
init_code += f"""
self.model = load_model(model_file,custom_objects=ak.CUSTOM_OBJECTS)
"""
run_code += """
df = df.astype(np.float32)
return self.model.predict(df).reshape(1, -1)
"""
elif self.params['training']['algo'] in ['Neural Network','Convolutional Neural Network (1D)','Recurrent Neural Network','Recurrent Neural Network (GRU)','Recurrent Neural Network (LSTM)']:
self.importer.addModule(module='numpy',mod_as='np')
self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models')
init_code += f"""
self.model = load_model(model_file)
"""
run_code += """
df = np.expand_dims(df, axis=2)
df = df.astype(np.float32)
return self.model.predict(df).reshape(1, -1)
"""
else:
self.importer.addModule('joblib')
init_code += f"""
self.model = joblib.load(model_file)
"""
run_code += """
df = df.astype(np.float32)
return self.model.predict(df).reshape(1, -1)
"""
return code + init_code + run_code
def formatter_code(self):
self.importer.addModule('json')
self.importer.addModule('pandas', mod_as='pd')
return """
class output_format():
def __init__(self):
pass
def run(self, raw_df, output):
raw_df['prediction'] = output[0]
raw_df['prediction'] = raw_df['prediction'].round(2)
outputjson = raw_df.to_json(orient='records',double_precision=5)
outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}
return(json.dumps(outputjson))
"""
class clustering( deployer):
def __init__(self, params={}):
super().__init__( params)
self.feature_reducer = False
if not self.name:
self.name = 'clustering'
def training_code( self):
self.importer.addModule('joblib')
self.importer.addModule(module='pandas',mod_as='pd')
code = f"""
class trainer():
"""
init_code = f"""
def __init__( self):
model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}"
if not model_file.exists():
raise ValueError(f'Trained model file not found: {{model_file}}')
"""
run_code = f"""
def run(self, df):\\
"""
if self.params['training']['algo'] == 'DBSCAN':
init_code += f"""
self.model = joblib.load(model_file)
"""
run_code += """
return self.model.fit_predict(df)
"""
else:
init_code += f"""
self.model = joblib.load(model_file)
"""
run_code += """
return self.model.predict(df).reshape(1, -1)
"""
return code + init_code + run_code
def formatter_code(self):
self.importer.addModule('json')
self.importer.addModule('pandas', mod_as='pd')
return """
class output_format():
def __init__(self):
pass
def run(self, raw_df, output):
raw_df['prediction'] = output[0]
raw_df['prediction'] = raw_df['prediction'].round(2)
outputjson = raw_df.to_json(orient='records',double_precision=2)
outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}
return(json.dumps(outputjson))
"""
return code
if __name__ == '__main__':
config = {'usecase_name': 'AI0110', 'usecase_ver': '1', 'features': {'input_feat': ['v2'], 'target_feat': 'v1', 'text_feat': ['v2']}, 'paths': {'deploy': r'C:/Users/vashistah/AppData/Local/Programs/HCLTech/AION/data/target/AI0110/1', 'usecase': r'C:/Users/vashistah/AppData/Local/Programs/HCLTech/AION/data/target/AI0110'}, 'profiler': {'input_features': ['v2'], 'output_features': ['07xxxxxxxxx_vect', '08700621170150p_vect', '08702840625comuk_vect', '08718726270150gbpmtmsg18_vect', '1000s_vect', '10am7pm_vect', '10k_vect', '10p_vect', '10pmin_vect', '10ppm_vect', '11mths_vect', '125gift_vect', '12hrs_vect', '12mths_vect', '150p_vect', '150perwksub_vect', '150pm_vect', '150pmin_vect', '150pmsg_vect', '150pmsgrcvdhgsuite3422landsroww1j6hl_vect', '150pmtmsgrcvd18_vect', '150ppm_vect', '150ptone_vect', '150pwk_vect', '150week_vect', '16only_vect', '18only_vect', '1hr_vect', '1minmobsmorelkpobox177hp51fl_vect', '1st_vect', '1x150pwk_vect', '20p_vect', '20pmin_vect', '21st_vect', '220cm2_vect', '24hrs_vect', '25p_vect', '26th_vect', '2day_vect', '2find_vect', '2geva_vect', '2go_vect', '2marrow_vect', '2mrw_vect', '2nd_vect', '2nite_vect', '2optout_vect', '2p_vect', '2u_vect', '2waxsto_vect', '2wks_vect', '300p_vect', '31pmsg_vect', '3510i_vect', '3d_vect', '3g_vect', '3gbp_vect', '3hrs_vect', '3mins_vect', '3qxj9_vect', '3rd_vect', '3ss_vect', '3u_vect', '3uz_vect', '3wk_vect', '40gb_vect', '4a_vect', '4d_vect', '4eva_vect', '4get_vect', '4info_vect', '4mths_vect', '4th_vect', '4u_vect', '50p_vect', '5min_vect', '5pm_vect', '5wb_vect', '5we_vect', '60pmin_vect', '6hrs_vect', '6months_vect', '6pm_vect', '7250i_vect', '7ish_vect', '8am_vect', '8pm_vect', '8th_vect', '8wp_vect', '9ae_vect', '9ja_vect', '9pm_vect', '9t_vect', 'aathi_vect', 'abi_vect', 'ability_vect', 'abiola_vect', 'able_vect', 'abt_vect', 'abta_vect', 'aburo_vect', 'ac_vect', 'academic_vect', 'acc_vect', 'accept_vect', 'access_vect', 'accident_vect', 'accidentally_vect', 'accordingly_vect', 'account_vect', 'ache_vect', 'across_vect', 'acted_vect', 'action_vect', 'activate_vect', 'activities_vect', 'actor_vect', 'actual_vect', 'actually_vect', 'ad_vect', 'adam_vect', 'add_vect', 'added_vect', 'addicted_vect', 'addie_vect', 'address_vect', 'admin_vect', 'administrator_vect', 'admirer_vect', 'admit_vect', 'adore_vect', 'adoring_vect', 'ads_vect', 'adult_vect', 'advance_vect', 'adventure_vect', 'advice_vect', 'advise_vect', 'affair_vect', 'affairs_vect', 'affectionate_vect', 'afraid_vect', 'aft_vect', 'afternoon_vect', 'aftr_vect', 'agalla_vect', 'age_vect', 'age16_vect', 'ages_vect', 'ago_vect', 'agree_vect', 'ah_vect', 'aha_vect', 'ahead_vect', 'ahmad_vect', 'ai_vect', 'aight_vect', 'aint_vect', 'air_vect', 'airport_vect', 'airtel_vect', 'aiya_vect', 'aiyah_vect', 'aiyar_vect', 'aiyo_vect', 'al_vect', 'album_vect', 'alert_vect', 'alex_vect', 'alfie_vect', 'ali_vect', 'allah_vect', 'allow_vect', 'allowed_vect', 'almost_vect', 'alone_vect', 'along_vect', 'already_vect', 'alright_vect', 'alrite_vect', 'also_vect', 'always_vect', 'alwys_vect', 'amazing_vect', 'american_vect', 'among_vect', 'amount_vect', 'amp_vect', 'amt_vect', 'andros_vect', 'angry_vect', 'annie_vect', 'anniversary_vect', 'announcement_vect', 'anot_vect', 'another_vect', 'ans_vect', 'ansr_vect', 'answer_vect', 'answered_vect', 'answering_vect', 'answers_vect', 'anthony_vect', 'anti_vect', 'anybody_vect', 'anymore_vect', 'anyone_vect', 'anything_vect', 'anytime_vect', 'anyway_vect', 'anyways_vect', 'apartment_vect', 'app_vect', 'apparently_vect', 'applebees_vect', 'apply_vect', 'appointment_vect', 'appreciate_vect', 'appreciated_vect', 'approx_vect', 'apps_vect', 'appt_vect', 'april_vect', 'ar_vect', 'arcade_vect', 'ard_vect', 'area_vect', 'argh_vect', 'argument_vect', 'arm_vect', 'armand_vect', 'arms_vect', 'around_vect', 'arrange_vect', 'arrested_vect', 'arrive_vect', 'arsenal_vect', 'art_vect', 'arun_vect', 'asap_vect', 'ashley_vect', 'ask_vect', 'askd_vect', 'asked_vect', 'askin_vect', 'asking_vect', 'asks_vect', 'asleep_vect', 'ass_vect', 'assume_vect', 'ate_vect', 'atlanta_vect', 'atlast_vect', 'atm_vect', 'attached_vect', 'attempt_vect', 'attend_vect', 'auction_vect', 'august_vect', 'aunt_vect', 'aunty_vect', 'auto_vect', 'av_vect', 'available_vect', 'avatar_vect', 'ave_vect', 'avent_vect', 'avoid_vect', 'await_vect', 'awaiting_vect', 'awake_vect', 'award_vect', 'awarded_vect', 'away_vect', 'awesome_vect', 'aww_vect', 'b4_vect', 'ba_vect', 'babe_vect', 'babes_vect', 'babies_vect', 'baby_vect', 'back_vect', 'bad_vect', 'bag_vect', 'bags_vect', 'bahamas_vect', 'bak_vect', 'balance_vect', 'bank_vect', 'banks_vect', 'bar_vect', 'barely_vect', 'basic_vect', 'basically_vect', 'bat_vect', 'bath_vect', 'bathe_vect', 'bathing_vect', 'battery_vect', 'bay_vect', 'bb_vect', 'bc_vect', 'bck_vect', 'bcoz_vect', 'bday_vect', 'be_vect', 'bears_vect', 'beautiful_vect', 'beauty_vect', 'bec_vect', 'become_vect', 'becoz_vect', 'bed_vect', 'bedrm_vect', 'bedroom_vect', 'beer_vect', 'befor_vect', 'beg_vect', 'begin |
_vect', 'behave_vect', 'behind_vect', 'bein_vect', 'believe_vect', 'bell_vect', 'belly_vect', 'belovd_vect', 'best_vect', 'bet_vect', 'better_vect', 'beyond_vect', 'bf_vect', 'bid_vect', 'bids_vect', 'big_vect', 'bigger_vect', 'biggest_vect', 'bill_vect', 'billed_vect', 'billion_vect', 'bills_vect', 'bin_vect', 'biola_vect', 'birds_vect', 'birla_vect', 'birth_vect', 'birthdate_vect', 'birthday_vect', 'bishan_vect', 'bit_vect', 'bitch_vect', 'bite_vect', 'black_vect', 'blackberry_vect', 'blah_vect', 'blake_vect', 'blank_vect', 'bleh_vect', 'bless_vect', 'blessing_vect', 'bloo_vect', 'blood_vect', 'bloody_vect', 'blue_vect', 'bluetooth_vect', 'bluff_vect', 'boat_vect', 'body_vect', 'bold_vect', 'bone_vect', 'bonus_vect', 'boo_vect', 'book_vect', 'booked_vect', 'booking_vect', 'books_vect', 'boost_vect', 'booty_vect', 'bored_vect', 'boring_vect', 'born_vect', 'boss_vect', 'boston_vect', 'bother_vect', 'bottom_vect', 'bought_vect', 'bout_vect', 'bowl_vect', 'box_vect', 'box326_vect', 'box334sk38ch_vect', 'box97n7qp_vect', 'boy_vect', 'boye_vect', 'boyfriend_vect', 'boys_vect', 'boytoy_vect', 'brah_vect', 'brand_vect', 'bread_vect', 'break_vect', 'breathe_vect', 'bright_vect', 'brilliant_vect', 'bring_vect', 'bringing_vect', 'brings_vect', 'british_vect', 'bro_vect', 'broad_vect', 'broke_vect', 'broken_vect', 'bros_vect', 'brothas_vect', 'brother_vect', 'brought_vect', 'bruv_vect', 'bslvyl_vect', 'bt_vect', 'btnationalrate_vect', 'btw_vect', 'bucks_vect', 'bud_vect', 'budget_vect', 'buff_vect', 'buffet_vect', 'bugis_vect', 'building_vect', 'buns_vect', 'burger_vect', 'burns_vect', 'bus_vect', 'buses_vect', 'business_vect', 'busy_vect', 'butt_vect', 'buy_vect', 'buying_vect', 'buzz_vect', 'bx420_vect', 'bx420ip45we_vect', 'bye_vect', 'ca_vect', 'cabin_vect', 'cafe_vect', 'cake_vect', 'cal_vect', 'calculation_vect', 'calicut_vect', 'california_vect', 'call_vect', 'call2optout674_vect', 'callback_vect', 'callcost_vect', 'called_vect', 'caller_vect', 'callers_vect', 'callertune_vect', 'callin_vect', 'calling_vect', 'calls_vect', 'callså_vect', 'cam_vect', 'camcorder_vect', 'came_vect', 'camera_vect', 'cameravideo_vect', 'campus_vect', 'can_vect', 'canada_vect', 'canal_vect', 'canary_vect', 'cancel_vect', 'cancelled_vect', 'cancer_vect', 'cant_vect', 'captain_vect', 'car_vect', 'card_vect', 'cardiff_vect', 'care_vect', 'cared_vect', 'career_vect', 'careful_vect', 'carefully_vect', 'caring_vect', 'carlos_vect', 'caroline_vect', 'cars_vect', 'cartoon_vect', 'case_vect', 'cash_vect', 'cashbalance_vect', 'cashin_vect', 'castor_vect', 'cat_vect', 'catch_vect', 'catching_vect', 'caught_vect', 'cause_vect', 'cbe_vect', 'cc_vect', 'cd_vect', 'cdgt_vect', 'cds_vect', 'celebrate_vect', 'celebration_vect', 'cell_vect', 'center_vect', 'centre_vect', 'certainly_vect', 'cha_vect', 'chain_vect', 'challenge_vect', 'chance_vect', 'change_vect', 'changed_vect', 'changes_vect', 'channel_vect', 'character_vect', 'charge_vect', 'charged_vect', 'charges_vect', 'charity_vect', 'charles_vect', 'chase_vect', 'chasing_vect', 'chat_vect', 'chatting_vect', 'cheap_vect', 'cheaper_vect', 'cheat_vect', 'chechi_vect', 'check_vect', 'checked_vect', 'checking_vect', 'cheers_vect', 'chennai_vect', 'cherish_vect', 'chest_vect', 'chicken_vect', 'chikku_vect', 'child_vect', 'childish_vect', 'children_vect', 'chill_vect', 'chillin_vect', 'china_vect', 'chinese_vect', 'chip_vect', 'chocolate_vect', 'choice_vect', 'choose_vect', 'chosen_vect', 'christ_vect', 'christmas_vect', 'church_vect', 'cine_vect', 'cinema_vect', 'citizen_vect', 'city_vect', 'claim_vect', 'claims_vect', 'claire_vect', 'class_vect', 'classes_vect', 'clean_vect', 'cleaning_vect', 'clear_vect', 'clearly_vect', 'click_vect', 'clock_vect', 'close_vect', 'closed_vect', 'closer_vect', 'closes_vect', 'clothes_vect', 'club_vect', 'cn_vect', 'co_vect', 'coast_vect', 'coat_vect', 'cochin_vect', 'code_vect', 'coffee_vect', 'coin_vect', 'coins_vect', 'cold_vect', 'colleagues_vect', 'collect_vect', 'collected_vect', 'collecting_vect', 'collection_vect', 'college_vect', 'colour_vect', 'come_vect', 'comedy_vect', 'comes_vect', 'comin_vect', 'coming_vect', 'commercial_vect', 'common_vect', 'community_vect', 'comp_vect', 'company_vect', 'competition_vect', 'complete_vect', 'completed_vect', 'completely_vect', 'complimentary_vect', 'computer_vect', 'concentrate_vect', 'concert_vect', 'conditions_vect', 'conducts_vect', 'confidence_vect', 'confirm_vect', 'congrats_vect', 'congratulations_vect', 'connection_vect', 'consider_vect', 'considering_vect', 'constant_vect', 'constantly_vect', 'contact_vect', 'contacted_vect', 'contacts_vect', 'content_vect', 'contents_vect', 'continue_vect', 'contract_vect', 'control_vect', 'convey_vect', 'convinced_vect', 'cool_vect', 'coping_vect', 'copy_vect', 'cornwall_vect', 'correct_vect', 'cos_vect', 'cost_vect', 'costa_vect', 'costs_vect', 'costå_vect', 'could_vect', 'count_vect', 'countin_vect', 'country_vect', 'couple_vect', 'course_vect', 'cover_vect', 'coz_vect', 'cr9_vect', 'cramps_vect', 'crave_vect', 'crazy_vect', 'created_vect', 'credit_vect', 'credited_vect', 'credits_vect', 'creepy_vect', 'crisis_vect', 'crore_vect', 'cross_vect', 'croydon_vect', 'cruise_vect', 'cry_vect', 'cs_vect', 'csbcm4235wc1n3xx_vect', 'csstop_vect', 'cud_vect', 'cuddling_vect', 'cum_vect', 'cup_vect', 'curious_vect', 'current_vect', 'currently_vect', 'cust_vect', 'custcare_vect', 'custcare08718720201_vect', 'custom_vect', 'customer_vect', 'customers_vect', 'cut_vect', 'cute_vect', 'cutting_vect', 'cuz_vect', 'cw25wx_vect', 'da_vect', 'dad_vect', 'daddy_vect', 'daily_vect', 'damn_vect', 'dance_vect', 'dancing_vect', 'dare_vect', 'dark_vect', 'darlin_vect', 'darling_vect', 'darren_vect', 'dat_vect', 'date_vect', 'dates_vect', 'dating_vect', 'dave_vect', 'day_vect', 'days_vect', 'de_vect', 'dead_vect', 'deal_vect', 'dealer_vect', 'dealing_vect', 'dear_vect', 'dearly_vect', 'death_vect', 'decide_vect', 'decided_vect', 'decimal_vect', 'decision_vect', 'deep_vect', 'def_vect', 'definite_vect', 'definitely_vect', 'del_vect', 'delete_vect', 'deleted_vect', 'delhi_vect', 'deliver_vect', 'delivered_vect', 'deliveredtomorrow_vect', 'delivery_vect', 'dem_vect', 'demand_vect', 'den_vect', 'denis_vect', 'department_vect', 'depends_vect', 'depressed_vect', 'derek_vect', 'desires_vect', 'desperate_vect', 'details_vect', 'dey_vect', 'dhoni_vect', 'dial_vect', 'dick_vect', 'dictionary_vect', 'didn_vect', 'didnt_vect', 'didt_vect', 'die_vect', 'died_vect', 'diet_vect', 'different_vect', 'difficult_vect', 'digital_vect', 'dignity_vect', 'din_vect', 'dinner_vect', 'dint_vect', 'direct_vect', 'directly_vect', 'dirty_vect', 'dis_vect', 'discount_vect', 'discuss_vect', 'dislikes_vect', 'display_vect', 'distance_vect', 'distract_vect', 'disturb_vect', 'division_vect', 'dload_vect', 'dnt_vect', 'doc_vect', 'docs_vect', 'doctor_vect', 'doesnt_vect', 'dog_vect', 'dogging_vect', 'doggy_vect', 'doin_vect', 'dollars_vect', 'don_vect', 'done_vect', 'dont_vect', 'donåõt_vect', 'door_vect', 'dorm_vect', 'double_vect', 'dough_vect', 'download_vect', 'downloads_vect', 'draw_vect', 'dream_vect', 'dreams_vect', 'dress_vect', 'dressed_vect', 'dresser_vect', 'drink_vect', 'drinking_vect', 'drinks_vect', 'drive_vect', 'driver_vect', 'drivin_vect', 'driving_vect', 'drop_vect', 'dropped_vect', 'drug_vect', 'drugs_vect', 'drunk_vect', 'dry_vect', 'ds_vect', 'dubsack_vect', 'dude_vect', 'due_vect', 'dun_vect', 'dunno_vect', 'durban_vect', 'dvd_vect', 'earlier_vect', 'early_vect', 'earth_vect', 'easier_vect', 'easily_vect', 'east_vect', 'easter_vect', 'easy_vect', 'eat_vect', 'eaten_vect', 'eatin_vect', 'eating_vect', 'ebay_vect', 'ec2a_vect', 'ee_vect', 'eek_vect', 'eerie_vect', 'effects_vect', 'eg_vect', 'egg_vect', 'eggs_vect', 'eh_vect', 'eight_vect', 'either_vect', 'ela_vect', 'electricity_vect', 'else_vect', 'elsewhere_vect', 'em_vect', 'email_vect', 'embarassed_vect', 'empty_vect', 'end_vect', 'ended_vect', 'ending_vect', 'ends_vect', 'enemy_vect', 'energy_vect', 'eng_vect', 'engin_vect', 'england_vect', 'english_vect', 'enjoy_vect', 'enjoyed_vect', 'enough_vect', 'enter_vect', 'entered_vect', 'entitled_vect', 'entry_vect', 'enuff_vect', 'envelope_vect', 'er_vect', 'erm_vect', 'escape_vect', 'especially_vect', 'esplanade_vect', 'eta_vect', 'etc_vect', 'euro_vect', 'euro2004_vect', 'europe_vect', 'eve_vect', 'eveb_vect', 'even_vect', 'evening_vect', 'event_vect', 'ever_vect', 'every_vect', 'everybody_vect', 'everyday_vect', 'everyone_vect', 'everything_vect', 'everywhere_vect', 'evn_vect', 'evng_vect', 'ex_vect', 'exact_vect', 'exactly_vect', 'exam_vect', 'exams_vect', 'excellent_vect', 'except_vect', 'exciting_vect', 'excuse_vect', 'excuses_vect', 'executive_vect', 'exeter_vect', 'exhausted_vect', 'expect_vect', 'expect |
ing_vect', 'expensive_vect', 'experience_vect', 'expired_vect', 'expires_vect', 'explain_vect', 'explicit_vect', 'explosive_vect', 'express_vect', 'extra_vect', 'eye_vect', 'eyes_vect', 'fa_vect', 'fab_vect', 'face_vect', 'facebook_vect', 'fact_vect', 'faggy_vect', 'failed_vect', 'fair_vect', 'faith_vect', 'fall_vect', 'falls_vect', 'family_vect', 'fan_vect', 'fancies_vect', 'fancy_vect', 'fantasies_vect', 'fantastic_vect', 'fantasy_vect', 'far_vect', 'farm_vect', 'fast_vect', 'faster_vect', 'fat_vect', 'father_vect', 'fathima_vect', 'fault_vect', 'fave_vect', 'favorite_vect', 'favour_vect', 'favourite_vect', 'fb_vect', 'feb_vect', 'february_vect', 'feel_vect', 'feelin_vect', 'feeling_vect', 'feels_vect', 'fees_vect', 'feet_vect', 'fell_vect', 'felt_vect', 'fetch_vect', 'fever_vect', 'field_vect', 'fifteen_vect', 'fight_vect', 'fighting_vect', 'figure_vect', 'file_vect', 'files_vect', 'fill_vect', 'filling_vect', 'fills_vect', 'film_vect', 'final_vect', 'finally_vect', 'find_vect', 'fine_vect', 'fingers_vect', 'finish_vect', 'finished_vect', 'first_vect', 'fit_vect', 'fix_vect', 'fixed_vect', 'flag_vect', 'flaked_vect', 'flash_vect', 'flat_vect', 'flight_vect', 'flights_vect', 'flirt_vect', 'floor_vect', 'flower_vect', 'fml_vect', 'fo_vect', 'follow_vect', 'followed_vect', 'following_vect', 'fone_vect', 'food_vect', 'fool_vect', 'football_vect', 'force_vect', 'foreign_vect', 'forever_vect', 'forevr_vect', 'forget_vect', 'forgets_vect', 'forgiven_vect', 'forgot_vect', 'format_vect', 'forums_vect', 'forward_vect', 'forwarded_vect', 'found_vect', 'four_vect', 'fr_vect', 'fran_vect', 'freak_vect', 'free_vect', 'freefone_vect', 'freemsg_vect', 'freephone_vect', 'freezing_vect', 'fren_vect', 'frens_vect', 'fret_vect', 'fri_vect', 'friday_vect', 'friend_vect', 'friends_vect', 'friendship_vect', 'fringe_vect', 'frm_vect', 'frnd_vect', 'frnds_vect', 'frndship_vect', 'fuck_vect', 'fuckin_vect', 'fucking_vect', 'ful_vect', 'full_vect', 'fullonsmscom_vect', 'fun_vect', 'funny_vect', 'future_vect', 'fyi_vect', 'gal_vect', 'gals_vect', 'game_vect', 'games_vect', 'gang_vect', 'gap_vect', 'gaps_vect', 'garage_vect', 'garbage_vect', 'gary_vect', 'gas_vect', 'gautham_vect', 'gave_vect', 'gay_vect', 'gd_vect', 'ge_vect', 'gee_vect', 'geeee_vect', 'geeeee_vect', 'gender_vect', 'generally_vect', 'genius_vect', 'gentle_vect', 'gentleman_vect', 'gently_vect', 'germany_vect', 'get_vect', 'gets_vect', 'gettin_vect', 'getting_vect', 'gf_vect', 'gibbs_vect', 'gift_vect', 'gim_vect', 'girl_vect', 'girls_vect', 'gist_vect', 'giv_vect', 'give_vect', 'given_vect', 'gives_vect', 'giving_vect', 'glad_vect', 'gn_vect', 'go_vect', 'goal_vect', 'god_vect', 'goes_vect', 'goin_vect', 'going_vect', 'gold_vect', 'gon_vect', 'gona_vect', 'gone_vect', 'good_vect', 'goodmorning_vect', 'goodnight_vect', 'goodnite_vect', 'google_vect', 'gorgeous_vect', 'gossip_vect', 'got_vect', 'goto_vect', 'gotten_vect', 'govtinstituitions_vect', 'gr8_vect', 'grace_vect', 'gram_vect', 'grand_vect', 'granite_vect', 'gravity_vect', 'great_vect', 'green_vect', 'greet_vect', 'greetings_vect', 'grins_vect', 'grl_vect', 'ground_vect', 'group_vect', 'gt_vect', 'guaranteed_vect', 'gud_vect', 'gudnite_vect', 'guess_vect', 'guessing_vect', 'guide_vect', 'guilty_vect', 'guy_vect', 'guys_vect', 'gym_vect', 'ha_vect', 'haf_vect', 'haha_vect', 'hai_vect', 'hair_vect', 'haiz_vect', 'half_vect', 'halloween_vect', 'ham_vect', 'hand_vect', 'handed_vect', 'handle_vect', 'hands_vect', 'handset_vect', 'hanging_vect', 'happen_vect', 'happened_vect', 'happening_vect', 'happens_vect', 'happiness_vect', 'happy_vect', 'hard_vect', 'hardcore_vect', 'hardly_vect', 'harry_vect', 'hate_vect', 'hav_vect', 'havent_vect', 'havenåõt_vect', 'havin_vect', 'head_vect', 'headache_vect', 'headin_vect', 'heads_vect', 'hear_vect', 'heard_vect', 'heart_vect', 'heavy_vect', 'hee_vect', 'height_vect', 'held_vect', 'helen_vect', 'hell_vect', 'hella_vect', 'hello_vect', 'help_vect', 'help08712400602450p_vect', 'helpline_vect', 'hence_vect', 'henry_vect', 'heri_vect', 'herlove_vect', 'hes_vect', 'hex_vect', 'hey_vect', 'hgsuite3422lands_vect', 'hi_vect', 'hide_vect', 'high_vect', 'hill_vect', 'hint_vect', 'hip_vect', 'history_vect', 'hit_vect', 'hiya_vect', 'hl_vect', 'hlp_vect', 'hmm_vect', 'hmmm_vect', 'hmv_vect', 'ho_vect', 'hockey_vect', 'hol_vect', 'hold_vect', 'holder_vect', 'holding_vect', 'holiday_vect', 'holla_vect', 'hols_vect', 'holy_vect', 'home_vect', 'honey_vect', 'hook_vect', 'hop_vect', 'hope_vect', 'hoped_vect', 'hopefully_vect', 'hoping_vect', 'horny_vect', 'horo_vect', 'horrible_vect', 'hospital_vect', 'hospitals_vect', 'hostel_vect', 'hot_vect', 'hotel_vect', 'hotels_vect', 'hour_vect', 'hours_vect', 'house_vect', 'housemaid_vect', 'however_vect', 'hows_vect', 'howz_vect', 'hr_vect', 'hrishi_vect', 'hrs_vect', 'http_vect', 'hubby_vect', 'hug_vect', 'huh_vect', 'hun_vect', 'hungry_vect', 'hunny_vect', 'hurry_vect', 'hurt_vect', 'hurting_vect', 'hurts_vect', 'husband_vect', 'hv_vect', 'hw_vect', 'hyde_vect', 'iam_vect', 'ibhltd_vect', 'ibiza_vect', 'ic_vect', 'ice_vect', 'id_vect', 'idea_vect', 'ideal_vect', 'ideas_vect', 'identifier_vect', 'idiot_vect', 'idk_vect', 'ignore_vect', 'ikea_vect', 'il_vect', 'ill_vect', 'im_vect', 'imagine_vect', 'imma_vect', 'immediately_vect', 'imp_vect', 'important_vect', 'impossible_vect', 'improved_vect', 'in2_vect', 'inc_vect', 'inches_vect', 'incident_vect', 'include_vect', 'including_vect', 'inclusive_vect', 'inconsiderate_vect', 'indeed_vect', 'india_vect', 'indian_vect', 'indians_vect', 'indicate_vect', 'infections_vect', 'infernal_vect', 'info_vect', 'inform_vect', 'information_vect', 'informed_vect', 'innings_vect', 'insha_vect', 'inside_vect', 'instantly_vect', 'instead_vect', 'instructions_vect', 'insurance_vect', 'intelligent_vect', 'interest_vect', 'interested_vect', 'interesting_vect', 'interflora_vect', 'internet_vect', 'intro_vect', 'invest_vect', 'invite_vect', 'invited_vect', 'inviting_vect', 'iouri_vect', 'ip4_vect', 'ipod_vect', 'irritating_vect', 'iscoming_vect', 'ish_vect', 'island_vect', 'islands_vect', 'isnt_vect', 'issue_vect', 'issues_vect', 'it_vect', 'italian_vect', 'its_vect', 'itz_vect', 'itåõs_vect', 'ive_vect', 'iz_vect', 'izzit_vect', 'iåõm_vect', 'jacket_vect', 'jamster_vect', 'jan_vect', 'january_vect', 'jason_vect', 'java_vect', 'jay_vect', 'jazz_vect', 'jealous_vect', 'jeans_vect', 'jen_vect', 'jenny_vect', 'jess_vect', 'jesus_vect', 'jiayin_vect', 'jiu_vect', 'jo_vect', 'joanna_vect', 'job_vect', 'jogging_vect', 'john_vect', 'join_vect', 'joined_vect', 'joining_vect', 'joke_vect', 'jokes_vect', 'jokin_vect', 'joking_vect', 'jolly_vect', 'joy_vect', 'jsco_vect', 'jst_vect', 'juan_vect', 'juicy_vect', 'july_vect', 'june_vect', 'jus_vect', 'juz_vect', 'k52_vect', 'kadeem_vect', 'kaiez_vect', 'kallis_vect', 'kano_vect', 'kappa_vect', 'karaoke_vect', 'kate_vect', 'kay_vect', 'kb_vect', 'ke_vect', 'keep_vect', 'keeping_vect', 'keeps_vect', 'kent_vect', 'kept_vect', 'kerala_vect', 'key_vect', 'keys_vect', 'ki_vect', 'kick_vect', 'kid_vect', 'kidding_vect', 'kids_vect', 'kidz_vect', 'kind_vect', 'kinda_vect', 'kindly_vect', 'king_vect', 'kiss_vect', 'kisses_vect', 'kk_vect', 'knackered_vect', 'knew_vect', 'knock_vect', 'know_vect', 'knowing_vect', 'knows_vect', 'knw_vect', 'kz_vect', 'l8r_vect', 'la_vect', 'lab_vect', 'ladies_vect', 'lady_vect', 'lag_vect', 'laid_vect', 'land_vect', 'landline_vect', 'langport_vect', 'language_vect', 'laptop_vect', 'lar_vect', 'largest_vect', 'last_vect', 'late_vect', 'later_vect', 'latest_vect', 'latr_vect', 'laugh_vect', 'laughing_vect', 'law_vect', 'lazy_vect', 'ldn_vect', 'ldnw15h_vect', 'le_vect', 'lead_vect', 'learn_vect', 'least_vect', 'leave_vect', 'leaves_vect', 'leaving_vect', 'lect_vect', 'lecture_vect', 'left_vect', 'legal_vect', 'legs_vect', 'leh_vect', 'lei_vect', 'lem_vect', 'less_vect', 'lesson_vect', 'lessons_vect', 'let_vect', 'lets_vect', 'letter_vect', 'letters_vect', 'liao_vect', 'library_vect', 'lick_vect', 'licks_vect', 'lido_vect', 'lie_vect', 'lies_vect', 'life_vect', 'lifetime_vect', 'lift_vect', 'light_vect', 'lik_vect', 'like_vect', 'liked_vect', 'likely_vect', 'likes_vect', 'lil_vect', 'line_vect', 'linerental_vect', 'lines_vect', 'link_vect', 'lion_vect', 'lionm_vect', 'lionp_vect', 'lions_vect', 'lip_vect', 'list_vect', 'listen_vect', 'listening_vect', 'literally_vect', 'little_vect', 'live_vect', 'liverpool_vect', 'living_vect', 'lk_vect', 'll_vect', 'lmao_vect', ' |
lo_vect', 'loads_vect', 'loan_vect', 'loans_vect', 'local_vect', 'locations_vect', 'lock_vect', 'log_vect', 'login_vect', 'logo_vect', 'logopic_vect', 'lol_vect', 'london_vect', 'lonely_vect', 'long_vect', 'longer_vect', 'look_vect', 'lookatme_vect', 'looked_vect', 'lookin_vect', 'looking_vect', 'looks_vect', 'lor_vect', 'lose_vect', 'losing_vect', 'loss_vect', 'lost_vect', 'lot_vect', 'lotr_vect', 'lots_vect', 'lou_vect', 'loud_vect', 'lounge_vect', 'lousy_vect', 'lovable_vect', 'love_vect', 'loved_vect', 'lovely_vect', 'loveme_vect', 'lover_vect', 'loverboy_vect', 'lovers_vect', 'loves_vect', 'loving_vect', 'low_vect', 'lower_vect', 'loyal_vect', 'loyalty_vect', 'ls15hb_vect', 'lt_vect', 'ltd_vect', 'luck_vect', 'lucky_vect', 'lucy_vect', 'lunch_vect', 'luv_vect', 'lux_vect', 'luxury_vect', 'lyf_vect', 'lyfu_vect', 'lyk_vect', 'm227xy_vect', 'm26_vect', 'm263uz_vect', 'm8s_vect', 'mac_vect', 'machan_vect', 'macho_vect', 'mad_vect', 'madam_vect', 'made_vect', 'mag_vect', 'maga_vect', 'magical_vect', 'mah_vect', 'mail_vect', 'mailbox_vect', 'main_vect', 'maintain_vect', 'major_vect', 'make_vect', 'makes_vect', 'makin_vect', 'making_vect', 'malaria_vect', 'male_vect', 'mall_vect', 'man_vect', 'managed_vect', 'management_vect', 'many_vect', 'map_vect', 'march_vect', 'mark_vect', 'market_vect', 'marriage_vect', 'married_vect', 'marry_vect', 'masters_vect', 'match_vect', 'matches_vect', 'mate_vect', 'mates_vect', 'matrix3_vect', 'matter_vect', 'max10mins_vect', 'maximize_vect', 'maxå_vect', 'may_vect', 'mayb_vect', 'maybe_vect', 'mca_vect', 'mcat_vect', 'meal_vect', 'mean_vect', 'meaning_vect', 'means_vect', 'meant_vect', 'meanwhile_vect', 'med_vect', 'medical_vect', 'medicine_vect', 'meds_vect', 'meet_vect', 'meetin_vect', 'meeting_vect', 'meh_vect', 'mei_vect', 'member_vect', 'members_vect', 'men_vect', 'menu_vect', 'merry_vect', 'mess_vect', 'message_vect', 'messaged_vect', 'messages_vect', 'messaging_vect', 'met_vect', 'mid_vect', 'middle_vect', 'midnight_vect', 'mids_vect', 'might_vect', 'miles_vect', 'milk_vect', 'min_vect', 'mind_vect', 'mine_vect', 'mini_vect', 'minimum_vect', 'minor_vect', 'mins_vect', 'minute_vect', 'minutes_vect', 'minuts_vect', 'miracle_vect', 'mis_vect', 'miserable_vect', 'miss_vect', 'missed_vect', 'missin_vect', 'missing_vect', 'mistake_vect', 'mistakes_vect', 'mite_vect', 'mm_vect', 'mmm_vect', 'mmmm_vect', 'mmmmmm_vect', 'mnths_vect', 'mo_vect', 'moan_vect', 'mob_vect', 'mobile_vect', 'mobiles_vect', 'mobilesdirect_vect', 'mobilesvary_vect', 'mobileupd8_vect', 'mobno_vect', 'moby_vect', 'mode_vect', 'model_vect', 'module_vect', 'modules_vect', 'moji_vect', 'mojibiola_vect', 'mokka_vect', 'mom_vect', 'moment_vect', 'moments_vect', 'moms_vect', 'mon_vect', 'monday_vect', 'money_vect', 'monkeys_vect', 'mono_vect', 'month_vect', 'monthly_vect', 'months_vect', 'mood_vect', 'moon_vect', 'moral_vect', 'morn_vect', 'morning_vect', 'mostly_vect', 'mother_vect', 'motorola_vect', 'mouth_vect', 'move_vect', 'moved_vect', 'movie_vect', 'movies_vect', 'moving_vect', 'mp3_vect', 'mr_vect', 'mrng_vect', 'mrt_vect', 'msg_vect', 'msgs_vect', 'mths_vect', 'mu_vect', 'much_vect', 'mum_vect', 'mummy_vect', 'murder_vect', 'murdered_vect', 'murderer_vect', 'music_vect', 'must_vect', 'muz_vect', 'na_vect', 'nag_vect', 'nah_vect', 'naked_vect', 'name_vect', 'name1_vect', 'name2_vect', 'named_vect', 'names_vect', 'nan_vect', 'nap_vect', 'nasdaq_vect', 'nat_vect', 'national_vect', 'natural_vect', 'nature_vect', 'naughty_vect', 'nb_vect', 'nd_vect', 'ne_vect', 'near_vect', 'nearly_vect', 'necessarily_vect', 'necklace_vect', 'ned_vect', 'need_vect', 'needed_vect', 'needs_vect', 'neither_vect', 'net_vect', 'netcollex_vect', 'network_vect', 'networks_vect', 'neva_vect', 'never_vect', 'new_vect', 'newest_vect', 'news_vect', 'next_vect', 'ni8_vect', 'nice_vect', 'nigeria_vect', 'night_vect', 'nights_vect', 'nimya_vect', 'nite_vect', 'no1_vect', 'nobody_vect', 'noe_vect', 'nokia_vect', 'nokias_vect', 'noline_vect', 'none_vect', 'noon_vect', 'nope_vect', 'norm_vect', 'norm150ptone_vect', 'normal_vect', 'normally_vect', 'northampton_vect', 'note_vect', 'nothin_vect', 'nothing_vect', 'notice_vect', 'noun_vect', 'nowadays_vect', 'nt_vect', 'ntt_vect', 'ntwk_vect', 'num_vect', 'number_vect', 'numbers_vect', 'nuther_vect', 'nvm_vect', 'nw_vect', 'nxt_vect', 'nyc_vect', 'nydc_vect', 'nyt_vect', 'o2_vect', 'obviously_vect', 'odi_vect', 'offer_vect', 'offers_vect', 'office_vect', 'official_vect', 'officially_vect', 'ofice_vect', 'often_vect', 'oh_vect', 'oi_vect', 'oic_vect', 'ok_vect', 'okay_vect', 'okey_vect', 'okie_vect', 'ola_vect', 'old_vect', 'omg_vect', 'omw_vect', 'one_vect', 'ones_vect', 'oni_vect', 'online_vect', 'onto_vect', 'onwards_vect', 'oooh_vect', 'oops_vect', 'open_vect', 'opening_vect', 'operator_vect', 'opinion_vect', 'opportunity_vect', 'opt_vect', 'option_vect', 'optout_vect', 'or2stoptxt_vect', 'orange_vect', 'oranges_vect', 'orchard_vect', 'order_vect', 'ordered_vect', 'oredi_vect', 'original_vect', 'oru_vect', 'os_vect', 'oso_vect', 'others_vect', 'otherwise_vect', 'otside_vect', 'outside_vect', 'outstanding_vect', 'outta_vect', 'ovulation_vect', 'oz_vect', 'pa_vect', 'pack_vect', 'package_vect', 'page_vect', 'pages_vect', 'paid_vect', 'pain_vect', 'painful_vect', 'painting_vect', 'panic_vect', 'paper_vect', 'papers_vect', 'paperwork_vect', 'parco_vect', 'parent_vect', 'parents_vect', 'paris_vect', 'park_vect', 'parked_vect', 'parking_vect', 'part_vect', 'partner_vect', 'partnership_vect', 'party_vect', 'pass_vect', 'passed_vect', 'password_vect', 'past_vect', 'pattern_vect', 'patty_vect', 'pay_vect', 'paying_vect', 'payment_vect', 'payoh_vect', 'pc_vect', 'peace_vect', 'pence_vect', 'people_vect', 'per_vect', 'perfect_vect', 'period_vect', 'person_vect', 'personal_vect', 'pete_vect', 'petey_vect', 'pg_vect', 'philosophy_vect', 'phoenix_vect', 'phone_vect', 'phones_vect', 'photo_vect', 'photos_vect', 'pic_vect', 'pick_vect', 'picked_vect', 'picking_vect', 'pics_vect', 'picsfree1_vect', 'picture_vect', 'pictures_vect', 'pie_vect', 'pieces_vect', 'pig_vect', 'pilates_vect', 'pin_vect', 'pink_vect', 'piss_vect', 'pissed_vect', 'pix_vect', 'pizza_vect', 'place_vect', 'places_vect', 'plan_vect', 'planned_vect', 'planning_vect', 'plans_vect', 'play_vect', 'played_vect', 'player_vect', 'players_vect', 'playing_vect', 'please_vect', 'pleased_vect', 'pleasure_vect', 'plenty_vect', 'pls_vect', 'plus_vect', 'plz_vect', 'pm_vect', 'po_vect', 'pobox_vect', 'pobox334_vect', 'pobox36504w45wq_vect', 'pobox45w2tg150p_vect', 'pobox84_vect', 'pod_vect', 'point_vect', 'points_vect', 'poker_vect', 'pole_vect', 'police_vect', 'politicians_vect', 'poly_vect', 'polyphonic_vect', 'polys_vect', 'pongal_vect', 'poor_vect', 'pop_vect', 'popped_vect', 'porn_vect', 'possession_vect', 'possible_vect', 'post_vect', 'postcard_vect', 'postcode_vect', 'posted_vect', 'posts_vect', 'potential_vect', 'potter_vect', 'pound_vect', 'pounds_vect', 'pouts_vect', 'power_vect', 'ppl_vect', 'pple_vect', 'ppm_vect', 'prabha_vect', 'practice_vect', 'practicing_vect', 'pray_vect', 'prefer_vect', 'premier_vect', 'prepare_vect', 'prescription_vect', 'present_vect', 'press_vect', 'pretty_vect', 'prey_vect', 'price_vect', 'prince_vect', 'princess_vect', 'print_vect', 'privacy_vect', 'private_vect', 'prize_vect', 'prob_vect', 'probably_vect', 'problem_vect', 'problems_vect', 'process_vect', 'processed_vect', 'prof_vect', 'profit_vect', 'program_vect', 'project_vect', 'prolly_vect', 'promise_vect', 'promises_vect', 'promo_vect', 'proof_vect', 'properly_vect', 'prospects_vect', 'provided_vect', 'ps_vect', 'ptbo_vect', 'pub_vect', 'pull_vect', 'purchase_vect', 'purity_vect', 'purpose_vect', 'push_vect', 'pushes_vect', 'pussy_vect', 'put_vect', 'puttin_vect', 'putting_vect', 'qatar_vect', 'quality_vect', 'queen_vect', 'ques_vect', 'question_vect', 'questions_vect', 'quick_vect', 'quickly_vect', 'quiet_vect', 'quit_vect', 'quite_vect', 'quiz_vect', 'quote_vect', 'quoting_vect', 'racing_vect', 'radio_vect', 'railway_vect', 'rain_vect', 'raining_vect', 'raise_vect', 'rakhesh_vect', 'rally_vect', 'ran_vect', 'random_vect', 'randomly_vect', 'randy_vect', 'rang_vect', 'range_vect', 'ranjith_vect', 'rate_vect', 'rates_vect', 'rather_vect', 'rays_vect', 'rcvd_vect', 'rd_vect', 're_vect', 'reach_vect', 'reached_vect', 'reaching_vect', 'reaction_vect', 'read_vect', 'readers_vect', 'reading_vect', 'ready_vect', 'real_vect', 'realise_vect', 'reality_vect', 'realized_vect', 'really_vect', 'realy_vect', 'reason_vect', 'reasonable_vect', 'reasons_vect', 'reboot_vect', 'recd_vect', 'receipt_ |
vect', 'receive_vect', 'received_vect', 'receiving_vect', 'recent_vect', 'recently_vect', 'recession_vect', 'record_vect', 'records_vect', 'recovery_vect', 'red_vect', 'ref_vect', 'reference_vect', 'reg_vect', 'regards_vect', 'register_vect', 'registered_vect', 'regret_vect', 'regular_vect', 'relation_vect', 'relax_vect', 'released_vect', 'rem_vect', 'remain_vect', 'remains_vect', 'remember_vect', 'remembered_vect', 'remembr_vect', 'remind_vect', 'reminder_vect', 'remove_vect', 'rent_vect', 'rental_vect', 'rentl_vect', 'repair_vect', 'repeat_vect', 'replied_vect', 'reply_vect', 'replying_vect', 'report_vect', 'representative_vect', 'request_vect', 'requests_vect', 'research_vect', 'resend_vect', 'respect_vect', 'respond_vect', 'responding_vect', 'response_vect', 'responsibility_vect', 'rest_vect', 'restaurant_vect', 'result_vect', 'results_vect', 'retrieve_vect', 'return_vect', 'returned_vect', 'returns_vect', 'reveal_vect', 'revealed_vect', 'review_vect', 'revision_vect', 'reward_vect', 'rhythm_vect', 'rice_vect', 'rich_vect', 'ride_vect', 'right_vect', 'rights_vect', 'ring_vect', 'ringtone_vect', 'ringtones_vect', 'rite_vect', 'river_vect', 'road_vect', 'roads_vect', 'roast_vect', 'rock_vect', 'rocks_vect', 'rofl_vect', 'roger_vect', 'role_vect', 'ron_vect', 'room_vect', 'roommate_vect', 'roommates_vect', 'rooms_vect', 'rose_vect', 'round_vect', 'row_vect', 'roww1j6hl_vect', 'roww1jhl_vect', 'royal_vect', 'rply_vect', 'rs_vect', 'rstm_vect', 'ru_vect', 'rub_vect', 'rude_vect', 'rule_vect', 'run_vect', 'running_vect', 'runs_vect', 'rush_vect', 'sacrifice_vect', 'sad_vect', 'sae_vect', 'safe_vect', 'said_vect', 'salam_vect', 'salary_vect', 'sale_vect', 'salon_vect', 'sam_vect', 'santa_vect', 'sar_vect', 'sarasota_vect', 'sarcastic_vect', 'sary_vect', 'sat_vect', 'sathya_vect', 'saturday_vect', 'savamob_vect', 'save_vect', 'saved_vect', 'saw_vect', 'say_vect', 'saying_vect', 'says_vect', 'scared_vect', 'scary_vect', 'sch_vect', 'schedule_vect', 'school_vect', 'schools_vect', 'science_vect', 'scold_vect', 'score_vect', 'scores_vect', 'scotland_vect', 'scream_vect', 'screaming_vect', 'scrounge_vect', 'se_vect', 'sea_vect', 'search_vect', 'searching_vect', 'season_vect', 'seat_vect', 'sec_vect', 'second_vect', 'seconds_vect', 'secret_vect', 'secretary_vect', 'secs_vect', 'sed_vect', 'see_vect', 'seeing_vect', 'seem_vect', 'seemed_vect', 'seems_vect', 'seen_vect', 'selected_vect', 'selection_vect', 'self_vect', 'sell_vect', 'selling_vect', 'sells_vect', 'sem_vect', 'semester_vect', 'sen_vect', 'send_vect', 'sender_vect', 'sending_vect', 'sense_vect', 'sent_vect', 'sentence_vect', 'sept_vect', 'series_vect', 'serious_vect', 'seriously_vect', 'service_vect', 'services_vect', 'serving_vect', 'set_vect', 'setting_vect', 'settings_vect', 'settle_vect', 'settled_vect', 'seven_vect', 'several_vect', 'sex_vect', 'sexy_vect', 'sh_vect', 'sha_vect', 'shall_vect', 'share_vect', 'shd_vect', 'sheets_vect', 'shes_vect', 'shesil_vect', 'shining_vect', 'ship_vect', 'shipping_vect', 'shirt_vect', 'shirts_vect', 'shit_vect', 'shld_vect', 'shocking_vect', 'shoot_vect', 'shop_vect', 'shoppin_vect', 'shopping_vect', 'short_vect', 'shorter_vect', 'shortly_vect', 'shot_vect', 'shoving_vect', 'show_vect', 'shower_vect', 'showing_vect', 'shows_vect', 'shu_vect', 'shuhui_vect', 'shy_vect', 'si_vect', 'sick_vect', 'side_vect', 'sighs_vect', 'sight_vect', 'sign_vect', 'signing_vect', 'silence_vect', 'silent_vect', 'silver_vect', 'sim_vect', 'simple_vect', 'simply_vect', 'since_vect', 'sing_vect', 'singing_vect', 'single_vect', 'singles_vect', 'sipix_vect', 'sir_vect', 'sis_vect', 'sister_vect', 'sit_vect', 'site_vect', 'sitting_vect', 'situation_vect', 'siva_vect', 'six_vect', 'size_vect', 'sk3_vect', 'sk38xh_vect', 'skilgme_vect', 'skip_vect', 'sky_vect', 'skype_vect', 'skyped_vect', 'slap_vect', 'slave_vect', 'sleep_vect', 'sleepin_vect', 'sleeping_vect', 'sleepy_vect', 'slept_vect', 'slice_vect', 'slide_vect', 'slightly_vect', 'slip_vect', 'slippers_vect', 'slo_vect', 'slots_vect', 'slow_vect', 'slowly_vect', 'small_vect', 'smashed_vect', 'smile_vect', 'smiles_vect', 'smiling_vect', 'smoke_vect', 'smoking_vect', 'sms_vect', 'smth_vect', 'sn_vect', 'snake_vect', 'snow_vect', 'social_vect', 'sofa_vect', 'soft_vect', 'software_vect', 'sol_vect', 'some1_vect', 'somebody_vect', 'someone_vect', 'somethin_vect', 'something_vect', 'sometimes_vect', 'somewhere_vect', 'song_vect', 'songs_vect', 'sony_vect', 'sonyericsson_vect', 'soon_vect', 'sooner_vect', 'sore_vect', 'sorry_vect', 'sort_vect', 'sorting_vect', 'sound_vect', 'sounds_vect', 'south_vect', 'sp_vect', 'space_vect', 'spanish_vect', 'speak_vect', 'speaking_vect', 'special_vect', 'specialcall_vect', 'specially_vect', 'speed_vect', 'spend_vect', 'spending_vect', 'spent_vect', 'spk_vect', 'spoke_vect', 'spoken_vect', 'spook_vect', 'sport_vect', 'sports_vect', 'spree_vect', 'spring_vect', 'sptv_vect', 'sry_vect', 'st_vect', 'staff_vect', 'stamps_vect', 'stand_vect', 'standard_vect', 'standing_vect', 'star_vect', 'staring_vect', 'start_vect', 'started_vect', 'starting_vect', 'starts_vect', 'starwars3_vect', 'statement_vect', 'station_vect', 'stay_vect', 'stayed_vect', 'staying_vect', 'std_vect', 'steam_vect', 'step_vect', 'steve_vect', 'stick_vect', 'sticky_vect', 'still_vect', 'stock_vect', 'stockport_vect', 'stomach_vect', 'stomps_vect', 'stones_vect', 'stop_vect', 'stopped_vect', 'stops_vect', 'store_vect', 'stores_vect', 'story_vect', 'str_vect', 'straight_vect', 'stranger_vect', 'street_vect', 'stress_vect', 'strike_vect', 'strong_vect', 'strongbuy_vect', 'stuck_vect', 'student_vect', 'study_vect', 'studying_vect', 'stuff_vect', 'stupid_vect', 'style_vect', 'stylish_vect', 'sub_vect', 'subpoly_vect', 'subs_vect', 'subscribe6gbpmnth_vect', 'subscribed_vect', 'subscriber_vect', 'subscription_vect', 'success_vect', 'successful_vect', 'successfully_vect', 'sucks_vect', 'sue_vect', 'sufficient_vect', 'suggest_vect', 'suite_vect', 'suits_vect', 'sum1_vect', 'summer_vect', 'sun_vect', 'sunday_vect', 'sunlight_vect', 'sunny_vect', 'sunshine_vect', 'suntec_vect', 'sup_vect', 'super_vect', 'superb_vect', 'superior_vect', 'supervisor_vect', 'supply_vect', 'support_vect', 'suppose_vect', 'supposed_vect', 'suprman_vect', 'sura_vect', 'sure_vect', 'surely_vect', 'surfing_vect', 'surprise_vect', 'surprised_vect', 'survey_vect', 'sux_vect', 'suzy_vect', 'sw7_vect', 'sw73ss_vect', 'sweet_vect', 'swing_vect', 'system_vect', 'ta_vect', 'tablets_vect', 'tahan_vect', 'take_vect', 'taken_vect', 'takes_vect', 'takin_vect', 'taking_vect', 'talent_vect', 'talk_vect', 'talking_vect', 'tampa_vect', 'tape_vect', 'tariffs_vect', 'tat_vect', 'taunton_vect', 'taylor_vect', 'tb_vect', 'tc_vect', 'tcrw1_vect', 'tcs_vect', 'tea_vect', 'teach_vect', 'teacher_vect', 'teaches_vect', 'team_vect', 'tear_vect', 'tease_vect', 'teasing_vect', 'tech_vect', 'technical_vect', 'tee_vect', 'teeth_vect', 'tel_vect', 'telephone_vect', 'tell_vect', 'telling_vect', 'tells_vect', 'telugu_vect', 'temple_vect', 'ten_vect', 'tenants_vect', 'tenerife_vect', 'tension_vect', 'term_vect', 'terms_vect', 'terrible_vect', 'test_vect', 'testing_vect', 'text_vect', 'texted_vect', 'texting_vect', 'textoperator_vect', 'texts_vect', 'th_vect', 'thangam_vect', 'thank_vect', 'thanks_vect', 'thanksgiving_vect', 'thanx_vect', 'that_vect', 'thats_vect', 'thatåõs_vect', 'the_vect', 'theatre_vect', 'themob_vect', 'theory_vect', 'thesis_vect', 'thgt_vect', 'thing_vect', 'things_vect', 'think_vect', 'thinkin_vect', 'thinking_vect', 'thinks_vect', 'thk_vect', 'thnk_vect', 'tho_vect', 'though_vect', 'thought_vect', 'three_vect', 'throat_vect', 'throw_vect', 'thru_vect', 'tht_vect', 'thts_vect', 'thurs_vect', 'thursday_vect', 'tick_vect', 'ticket_vect', 'tickets_vect', 'tight_vect', 'tihs_vect', 'til_vect', 'till_vect', 'time_vect', 'times_vect', 'timing_vect', 'tired_vect', 'tirunelvali_vect', 'tirupur_vect', 'tissco_vect', 'tkts_vect', 'tm_vect', 'tming_vect', 'tmobile_vect', 'tmr_vect', 'tncs_vect', 'toa_vect', 'toclaim_vect', 'today_vect', 'todays_vect', 'tog_vect', 'together_vect', 'tok_vect', 'told_vect', 'tomarrow_vect', 'tomo_vect', 'tomorrow_vect', 'tone_vect', 'tones_vect', 'tones2youcouk_vect', 'tonight_vect', 'tonite_vect', 'took_vect', 'tool_vect', 'tooo_vect', 'toot_vect', 'top_vect', 'topic_vect', 'torch_vect', 'toshiba_vect', 'tot_vect', 'total_vect', 'totally_vect', 'touch_vect', 'tough_vect', 'tour_vect', 'towards_vect', 'town_vect', 'track_vect', 'trade_vect', 'traffic_vect', 'train_vect', 'training_vect', 'transaction_vect', 'transfer_vect', 'transport_vect', 'travel_vect', 'treat_vect', 'treated_vect', 'tried_vect', 'trip_vect', 'trips_vect', 'trouble_vect', 'true_vect', 'truffles_vect', 'truly_vect', 'trust_vect', 'truth_vect', 'try_ve |
ct', 'trying_vect', 'ts_vect', 'tscs_vect', 'tscs087147403231winawk_vect', 'tt_vect', 'ttyl_vect', 'tues_vect', 'tuesday_vect', 'tuition_vect', 'turn_vect', 'turning_vect', 'turns_vect', 'tv_vect', 'twelve_vect', 'twice_vect', 'two_vect', 'txt_vect', 'txtauction_vect', 'txtin_vect', 'txting_vect', 'txtno_vect', 'txts_vect', 'txtstop_vect', 'tyler_vect', 'type_vect', 'tyrone_vect', 'u4_vect', 'ubi_vect', 'ufind_vect', 'ugh_vect', 'uh_vect', 'uk_vect', 'uks_vect', 'ultimatum_vect', 'umma_vect', 'unable_vect', 'uncle_vect', 'understand_vect', 'understanding_vect', 'understood_vect', 'underwear_vect', 'unemployed_vect', 'uni_vect', 'unique_vect', 'university_vect', 'unless_vect', 'unlimited_vect', 'unnecessarily_vect', 'unredeemed_vect', 'unsold_vect', 'unsub_vect', 'unsubscribe_vect', 'upd8_vect', 'update_vect', 'updatenow_vect', 'upgrade_vect', 'upload_vect', 'upset_vect', 'upstairs_vect', 'ur_vect', 'ure_vect', 'urgent_vect', 'urgnt_vect', 'url_vect', 'urn_vect', 'urself_vect', 'us_vect', 'usb_vect', 'use_vect', 'used_vect', 'user_vect', 'usf_vect', 'using_vect', 'usual_vect', 'usually_vect', 'vale_vect', 'valentine_vect', 'valentines_vect', 'valid_vect', 'valid12hrs_vect', 'valuable_vect', 'value_vect', 'valued_vect', 'vary_vect', 've_vect', 'vegas_vect', 'verify_vect', 'version_vect', 'via_vect', 'vid_vect', 'video_vect', 'videochat_vect', 'videophones_vect', 'vijay_vect', 'vikky_vect', 'village_vect', 'violated_vect', 'violence_vect', 'vip_vect', 'virgin_vect', 'visit_vect', 'vivek_vect', 'vl_vect', 'voda_vect', 'vodafone_vect', 'vodka_vect', 'voice_vect', 'voicemail_vect', 'vomit_vect', 'vote_vect', 'voucher_vect', 'vouchers_vect', 'vry_vect', 'vth_vect', 'w45wq_vect', 'wa_vect', 'wah_vect', 'wait_vect', 'waited_vect', 'waitin_vect', 'waiting_vect', 'wake_vect', 'waking_vect', 'wales_vect', 'walk_vect', 'walked_vect', 'walking_vect', 'walmart_vect', 'wan_vect', 'wana_vect', 'want_vect', 'wanted_vect', 'wanting_vect', 'wants_vect', 'wap_vect', 'warm_vect', 'warner_vect', 'waste_vect', 'wasted_vect', 'wat_vect', 'watch_vect', 'watching_vect', 'water_vect', 'wats_vect', 'way_vect', 'wc1n3xx_vect', 'we_vect', 'weak_vect', 'wear_vect', 'wearing_vect', 'weather_vect', 'web_vect', 'website_vect', 'wed_vect', 'wedding_vect', 'wednesday_vect', 'wee_vect', 'weed_vect', 'week_vect', 'weekend_vect', 'weekends_vect', 'weekly_vect', 'weeks_vect', 'weigh_vect', 'weight_vect', 'weird_vect', 'welcome_vect', 'well_vect', 'welp_vect', 'wen_vect', 'went_vect', 'west_vect', 'wet_vect', 'what_vect', 'whatever_vect', 'whats_vect', 'whenever_vect', 'whenevr_vect', 'wherever_vect', 'whether_vect', 'white_vect', 'whn_vect', 'whole_vect', 'whos_vect', 'whose_vect', 'wid_vect', 'widelivecomindex_vect', 'wif_vect', 'wife_vect', 'wil_vect', 'willing_vect', 'win_vect', 'wind_vect', 'wine_vect', 'winner_vect', 'winning_vect', 'wins_vect', 'wipro_vect', 'wisdom_vect', 'wise_vect', 'wish_vect', 'wishes_vect', 'wishing_vect', 'wit_vect', 'within_vect', 'without_vect', 'wiv_vect', 'wk_vect', 'wkend_vect', 'wkg_vect', 'wkly_vect', 'wks_vect', 'wld_vect', 'wml_vect', 'wn_vect', 'wnt_vect', 'wo_vect', 'woke_vect', 'woken_vect', 'woman_vect', 'women_vect', 'wonder_vect', 'wonderful_vect', 'wondering_vect', 'wont_vect', 'woot_vect', 'word_vect', 'words_vect', 'work_vect', 'workin_vect', 'working_vect', 'works_vect', 'world_vect', 'worried_vect', 'worries_vect', 'worry_vect', 'worse_vect', 'worst_vect', 'worth_vect', 'wot_vect', 'would_vect', 'wow_vect', 'write_vect', 'wrong_vect', 'wtf_vect', 'wud_vect', 'wuld_vect', 'wun_vect', 'www4tcbiz_vect', 'wwwcomuknet_vect', 'wwwetlpcoukexpressoffer_vect', 'wwwgetzedcouk_vect', 'wwwldewcom_vect', 'wwwldewcom1win150ppmx3age16_vect', 'wwwmovietriviatv_vect', 'wwwringtonescouk_vect', 'wwwsmsconet_vect', 'wwwtxttowincouk_vect', 'wwwurawinnercom_vect', 'wylie_vect', 'xchat_vect', 'xmas_vect', 'xuhui_vect', 'xx_vect', 'xxx_vect', 'xxxx_vect', 'xxxxx_vect', 'xy_vect', 'ya_vect', 'yahoo_vect', 'yan_vect', 'yar_vect', 'yay_vect', 'yck_vect', 'yeah_vect', 'year_vect', 'years_vect', 'yelling_vect', 'yellow_vect', 'yep_vect', 'yes_vect', 'yest_vect', 'yesterday_vect', 'yet_vect', 'yetunde_vect', 'yijue_vect', 'ym_vect', 'yo_vect', 'yoga_vect', 'yogasana_vect', 'yor_vect', 'you_vect', 'yr_vect', 'yrs_vect', 'yummy_vect', 'yun_vect', 'yuo_vect', 'yup_vect', 'zed_vect', 'zindgi_vect', 'ìï_vect', 'ûò_vect'], 'input_features_type': {'v2': 'O'}, 'word2num_features': [], 'unpreprocessed_columns': [], 'force_numeric_conv': [], 'conversion_method': 'TF_IDF'}, 'selector': {'reducer': False, 'reducer_file': '', 'input_features': ['v2'], 'output_features': ['07xxxxxxxxx_vect', '08700621170150p_vect', '08702840625comuk_vect', '08718726270150gbpmtmsg18_vect', '1000s_vect', '10am7pm_vect', '10k_vect', '10p_vect', '10pmin_vect', '10ppm_vect', '11mths_vect', '125gift_vect', '12hrs_vect', '12mths_vect', '150p_vect', '150perwksub_vect', '150pm_vect', '150pmin_vect', '150pmsg_vect', '150pmsgrcvdhgsuite3422landsroww1j6hl_vect', '150pmtmsgrcvd18_vect', '150ppm_vect', '150ptone_vect', '150pwk_vect', '150week_vect', '16only_vect', '18only_vect', '1hr_vect', '1minmobsmorelkpobox177hp51fl_vect', '1st_vect', '1x150pwk_vect', '20p_vect', '20pmin_vect', '21st_vect', '220cm2_vect', '24hrs_vect', '25p_vect', '26th_vect', '2day_vect', '2find_vect', '2geva_vect', '2go_vect', '2marrow_vect', '2mrw_vect', '2nd_vect', '2nite_vect', '2optout_vect', '2p_vect', '2u_vect', '2waxsto_vect', '2wks_vect', '300p_vect', '31pmsg_vect', '3510i_vect', '3d_vect', '3g_vect', '3gbp_vect', '3hrs_vect', '3mins_vect', '3qxj9_vect', '3rd_vect', '3ss_vect', '3u_vect', '3uz_vect', '3wk_vect', '40gb_vect', '4a_vect', '4d_vect', '4eva_vect', '4get_vect', '4info_vect', '4mths_vect', '4th_vect', '4u_vect', '50p_vect', '5min_vect', '5pm_vect', '5wb_vect', '5we_vect', '60pmin_vect', '6hrs_vect', '6months_vect', '6pm_vect', '7250i_vect', '7ish_vect', '8am_vect', '8pm_vect', '8th_vect', '8wp_vect', '9ae_vect', '9ja_vect', '9pm_vect', '9t_vect', 'aathi_vect', 'abi_vect', 'ability_vect', 'abiola_vect', 'able_vect', 'abt_vect', 'abta_vect', 'aburo_vect', 'ac_vect', 'academic_vect', 'acc_vect', 'accept_vect', 'access_vect', 'accident_vect', 'accidentally_vect', 'accordingly_vect', 'account_vect', 'ache_vect', 'across_vect', 'acted_vect', 'action_vect', 'activate_vect', 'activities_vect', 'actor_vect', 'actual_vect', 'actually_vect', 'ad_vect', 'adam_vect', 'add_vect', 'added_vect', 'addicted_vect', 'addie_vect', 'address_vect', 'admin_vect', 'administrator_vect', 'admirer_vect', 'admit_vect', 'adore_vect', 'adoring_vect', 'ads_vect', 'adult_vect', 'advance_vect', 'adventure_vect', 'advice_vect', 'advise_vect', 'affair_vect', 'affairs_vect', 'affectionate_vect', 'afraid_vect', 'aft_vect', 'afternoon_vect', 'aftr_vect', 'agalla_vect', 'age_vect', 'age16_vect', 'ages_vect', 'ago_vect', 'agree_vect', 'ah_vect', 'aha_vect', 'ahead_vect', 'ahmad_vect', 'ai_vect', 'aight_vect', 'aint_vect', 'air_vect', 'airport_vect', 'airtel_vect', 'aiya_vect', 'aiyah_vect', 'aiyar_vect', 'aiyo_vect', 'al_vect', 'album_vect', 'alert_vect', 'alex_vect', 'alfie_vect', 'ali_vect', 'allah_vect', 'allow_vect', 'allowed_vect', 'almost_vect', 'alone_vect', 'along_vect', 'already_vect', 'alright_vect', 'alrite_vect', 'also_vect', 'always_vect', 'alwys_vect', 'amazing_vect', 'american_vect', 'among_vect', 'amount_vect', 'amp_vect', 'amt_vect', 'andros_vect', 'angry_vect', 'annie_vect', 'anniversary_vect', 'announcement_vect', 'anot_vect', 'another_vect', 'ans_vect', 'ansr_vect', 'answer_vect', 'answered_vect', 'answering_vect', 'answers_vect', 'anthony_vect', 'anti_vect', 'anybody_vect', 'anymore_vect', 'anyone_vect', 'anything_vect', 'anytime_vect', 'anyway_vect', 'anyways_vect', 'apartment_vect', 'app_vect', 'apparently_vect', 'applebees_vect', 'apply_vect', 'appointment_vect', 'appreciate_vect', 'appreciated_vect', 'approx_vect', 'apps_vect', 'appt_vect', 'april_vect', 'ar_vect', 'arcade_vect', 'ard_vect', 'area_vect', 'argh_vect', 'argument_vect', 'arm_vect', 'armand_vect', 'arms_vect', 'around_vect', 'arrange_vect', 'arrested_vect', 'arrive_vect', 'arsenal_vect', 'art_vect', 'arun_vect', 'asap_vect', 'ashley_vect', 'ask_vect', 'askd_ve |
ct', 'asked_vect', 'askin_vect', 'asking_vect', 'asks_vect', 'asleep_vect', 'ass_vect', 'assume_vect', 'ate_vect', 'atlanta_vect', 'atlast_vect', 'atm_vect', 'attached_vect', 'attempt_vect', 'attend_vect', 'auction_vect', 'august_vect', 'aunt_vect', 'aunty_vect', 'auto_vect', 'av_vect', 'available_vect', 'avatar_vect', 'ave_vect', 'avent_vect', 'avoid_vect', 'await_vect', 'awaiting_vect', 'awake_vect', 'award_vect', 'awarded_vect', 'away_vect', 'awesome_vect', 'aww_vect', 'b4_vect', 'ba_vect', 'babe_vect', 'babes_vect', 'babies_vect', 'baby_vect', 'back_vect', 'bad_vect', 'bag_vect', 'bags_vect', 'bahamas_vect', 'bak_vect', 'balance_vect', 'bank_vect', 'banks_vect', 'bar_vect', 'barely_vect', 'basic_vect', 'basically_vect', 'bat_vect', 'bath_vect', 'bathe_vect', 'bathing_vect', 'battery_vect', 'bay_vect', 'bb_vect', 'bc_vect', 'bck_vect', 'bcoz_vect', 'bday_vect', 'be_vect', 'bears_vect', 'beautiful_vect', 'beauty_vect', 'bec_vect', 'become_vect', 'becoz_vect', 'bed_vect', 'bedrm_vect', 'bedroom_vect', 'beer_vect', 'befor_vect', 'beg_vect', 'begin_vect', 'behave_vect', 'behind_vect', 'bein_vect', 'believe_vect', 'bell_vect', 'belly_vect', 'belovd_vect', 'best_vect', 'bet_vect', 'better_vect', 'beyond_vect', 'bf_vect', 'bid_vect', 'bids_vect', 'big_vect', 'bigger_vect', 'biggest_vect', 'bill_vect', 'billed_vect', 'billion_vect', 'bills_vect', 'bin_vect', 'biola_vect', 'birds_vect', 'birla_vect', 'birth_vect', 'birthdate_vect', 'birthday_vect', 'bishan_vect', 'bit_vect', 'bitch_vect', 'bite_vect', 'black_vect', 'blackberry_vect', 'blah_vect', 'blake_vect', 'blank_vect', 'bleh_vect', 'bless_vect', 'blessing_vect', 'bloo_vect', 'blood_vect', 'bloody_vect', 'blue_vect', 'bluetooth_vect', 'bluff_vect', 'boat_vect', 'body_vect', 'bold_vect', 'bone_vect', 'bonus_vect', 'boo_vect', 'book_vect', 'booked_vect', 'booking_vect', 'books_vect', 'boost_vect', 'booty_vect', 'bored_vect', 'boring_vect', 'born_vect', 'boss_vect', 'boston_vect', 'bother_vect', 'bottom_vect', 'bought_vect', 'bout_vect', 'bowl_vect', 'box_vect', 'box326_vect', 'box334sk38ch_vect', 'box97n7qp_vect', 'boy_vect', 'boye_vect', 'boyfriend_vect', 'boys_vect', 'boytoy_vect', 'brah_vect', 'brand_vect', 'bread_vect', 'break_vect', 'breathe_vect', 'bright_vect', 'brilliant_vect', 'bring_vect', 'bringing_vect', 'brings_vect', 'british_vect', 'bro_vect', 'broad_vect', 'broke_vect', 'broken_vect', 'bros_vect', 'brothas_vect', 'brother_vect', 'brought_vect', 'bruv_vect', 'bslvyl_vect', 'bt_vect', 'btnationalrate_vect', 'btw_vect', 'bucks_vect', 'bud_vect', 'budget_vect', 'buff_vect', 'buffet_vect', 'bugis_vect', 'building_vect', 'buns_vect', 'burger_vect', 'burns_vect', 'bus_vect', 'buses_vect', 'business_vect', 'busy_vect', 'butt_vect', 'buy_vect', 'buying_vect', 'buzz_vect', 'bx420_vect', 'bx420ip45we_vect', 'bye_vect', 'ca_vect', 'cabin_vect', 'cafe_vect', 'cake_vect', 'cal_vect', 'calculation_vect', 'calicut_vect', 'california_vect', 'call_vect', 'call2optout674_vect', 'callback_vect', 'callcost_vect', 'called_vect', 'caller_vect', 'callers_vect', 'callertune_vect', 'callin_vect', 'calling_vect', 'calls_vect', 'callså_vect', 'cam_vect', 'camcorder_vect', 'came_vect', 'camera_vect', 'cameravideo_vect', 'campus_vect', 'can_vect', 'canada_vect', 'canal_vect', 'canary_vect', 'cancel_vect', 'cancelled_vect', 'cancer_vect', 'cant_vect', 'captain_vect', 'car_vect', 'card_vect', 'cardiff_vect', 'care_vect', 'cared_vect', 'career_vect', 'careful_vect', 'carefully_vect', 'caring_vect', 'carlos_vect', 'caroline_vect', 'cars_vect', 'cartoon_vect', 'case_vect', 'cash_vect', 'cashbalance_vect', 'cashin_vect', 'castor_vect', 'cat_vect', 'catch_vect', 'catching_vect', 'caught_vect', 'cause_vect', 'cbe_vect', 'cc_vect', 'cd_vect', 'cdgt_vect', 'cds_vect', 'celebrate_vect', 'celebration_vect', 'cell_vect', 'center_vect', 'centre_vect', 'certainly_vect', 'cha_vect', 'chain_vect', 'challenge_vect', 'chance_vect', 'change_vect', 'changed_vect', 'changes_vect', 'channel_vect', 'character_vect', 'charge_vect', 'charged_vect', 'charges_vect', 'charity_vect', 'charles_vect', 'chase_vect', 'chasing_vect', 'chat_vect', 'chatting_vect', 'cheap_vect', 'cheaper_vect', 'cheat_vect', 'chechi_vect', 'check_vect', 'checked_vect', 'checking_vect', 'cheers_vect', 'chennai_vect', 'cherish_vect', 'chest_vect', 'chicken_vect', 'chikku_vect', 'child_vect', 'childish_vect', 'children_vect', 'chill_vect', 'chillin_vect', 'china_vect', 'chinese_vect', 'chip_vect', 'chocolate_vect', 'choice_vect', 'choose_vect', 'chosen_vect', 'christ_vect', 'christmas_vect', 'church_vect', 'cine_vect', 'cinema_vect', 'citizen_vect', 'city_vect', 'claim_vect', 'claims_vect', 'claire_vect', 'class_vect', 'classes_vect', 'clean_vect', 'cleaning_vect', 'clear_vect', 'clearly_vect', 'click_vect', 'clock_vect', 'close_vect', 'closed_vect', 'closer_vect', 'closes_vect', 'clothes_vect', 'club_vect', 'cn_vect', 'co_vect', 'coast_vect', 'coat_vect', 'cochin_vect', 'code_vect', 'coffee_vect', 'coin_vect', 'coins_vect', 'cold_vect', 'colleagues_vect', 'collect_vect', 'collected_vect', 'collecting_vect', 'collection_vect', 'college_vect', 'colour_vect', 'come_vect', 'comedy_vect', 'comes_vect', 'comin_vect', 'coming_vect', 'commercial_vect', 'common_vect', 'community_vect', 'comp_vect', 'company_vect', 'competition_vect', 'complete_vect', 'completed_vect', 'completely_vect', 'complimentary_vect', 'computer_vect', 'concentrate_vect', 'concert_vect', 'conditions_vect', 'conducts_vect', 'confidence_vect', 'confirm_vect', 'congrats_vect', 'congratulations_vect', 'connection_vect', 'consider_vect', 'considering_vect', 'constant_vect', 'constantly_vect', 'contact_vect', 'contacted_vect', 'contacts_vect', 'content_vect', 'contents_vect', 'continue_vect', 'contract_vect', 'control_vect', 'convey_vect', 'convinced_vect', 'cool_vect', 'coping_vect', 'copy_vect', 'cornwall_vect', 'correct_vect', 'cos_vect', 'cost_vect', 'costa_vect', 'costs_vect', 'costå_vect', 'could_vect', 'count_vect', 'countin_vect', 'country_vect', 'couple_vect', 'course_vect', 'cover_vect', 'coz_vect', 'cr9_vect', 'cramps_vect', 'crave_vect', 'crazy_vect', 'created_vect', 'credit_vect', 'credited_vect', 'credits_vect', 'creepy_vect', 'crisis_vect', 'crore_vect', 'cross_vect', 'croydon_vect', 'cruise_vect', 'cry_vect', 'cs_vect', 'csbcm4235wc1n3xx_vect', 'csstop_vect', 'cud_vect', 'cuddling_vect', 'cum_vect', 'cup_vect', 'curious_vect', 'current_vect', 'currently_vect', 'cust_vect', 'custcare_vect', 'custcare08718720201_vect', 'custom_vect', 'customer_vect', 'customers_vect', 'cut_vect', 'cute_vect', 'cutting_vect', 'cuz_vect', 'cw25wx_vect', 'da_vect', 'dad_vect', 'daddy_vect', 'daily_vect', 'damn_vect', 'dance_vect', 'dancing_vect', 'dare_vect', 'dark_vect', 'darlin_vect', 'darling_vect', 'darren_vect', 'dat_vect', 'date_vect', 'dates_vect', 'dating_vect', 'dave_vect', 'day_vect', 'days_vect', 'de_vect', 'dead_vect', 'deal_vect', 'dealer_vect', 'dealing_vect', 'dear_vect', 'dearly_vect', 'death_vect', 'decide_vect', 'decided_vect', 'decimal_vect', 'decision_vect', 'deep_vect', 'def_vect', 'definite_vect', 'definitely_vect', 'del_vect', 'delete_vect', 'deleted_vect', 'delhi_vect', 'deliver_vect', 'delivered_vect', 'deliveredtomorrow_vect', 'delivery_vect', 'dem_vect', 'demand_vect', 'den_vect', 'denis_vect', 'department_vect', 'depends_vect', 'depressed_vect', 'derek_vect', 'desires_vect', 'desperate_vect', 'details_vect', 'dey_vect', 'dhoni_vect', 'dial_vect', 'dick_vect', 'dictionary_vect', 'didn_vect', 'didnt_vect', 'didt_vect', 'die_vect', 'died_vect', 'diet_vect', 'different_vect', 'difficult_vect', 'digital_vect', 'dignity_vect', 'din_vect', 'dinner_vect', 'dint_vect', 'direct_vect', 'directly_vect', 'dirty_vect', 'dis_vect', 'discount_vect', 'discuss_vect', 'dislikes_vect', 'display_vect', 'distance_vect', 'distract_vect', 'disturb_vect', 'division_vect', 'dload_vect', 'dnt_vect', 'doc_vect', 'docs_vect', 'doctor_vect', 'doesnt_vect', 'dog_vect', 'dogging_vect', 'doggy_vect', 'doin_vect', 'dollars_vect', 'don_vect', 'done_vect', 'dont_vect', 'donåõt_vect', 'door_vect', 'dorm_vect', 'double_vect', 'dough_vect', 'download_vect', 'downloads_vect', 'draw_vect', 'dream_vect', 'dreams_vect', 'dress_vect', 'dressed_vect', 'dresser_vect', 'drink_vect', 'drinking_vect', 'drinks_vect', 'drive_vect', 'driver_vect', 'drivin_vect', 'driving_vect', 'drop_vect', 'dropped_vect', 'drug_vect', 'drugs_vect', 'drunk_vect', 'dry_vect', 'ds_vect', 'dubsack_vect', 'dude_vect', 'due_vect', 'dun_vect', 'dunno_vect', 'durban_vect', 'dvd_vect', 'earlier_vect', 'early_vect', 'earth_vect', 'easier_vect', 'easily_vect', 'east_vect', 'easter_vect', 'easy_vect', 'eat_vect', 'eaten_vect', 'eatin_vect', 'eating_vect', 'ebay_vect', ' |
ec2a_vect', 'ee_vect', 'eek_vect', 'eerie_vect', 'effects_vect', 'eg_vect', 'egg_vect', 'eggs_vect', 'eh_vect', 'eight_vect', 'either_vect', 'ela_vect', 'electricity_vect', 'else_vect', 'elsewhere_vect', 'em_vect', 'email_vect', 'embarassed_vect', 'empty_vect', 'end_vect', 'ended_vect', 'ending_vect', 'ends_vect', 'enemy_vect', 'energy_vect', 'eng_vect', 'engin_vect', 'england_vect', 'english_vect', 'enjoy_vect', 'enjoyed_vect', 'enough_vect', 'enter_vect', 'entered_vect', 'entitled_vect', 'entry_vect', 'enuff_vect', 'envelope_vect', 'er_vect', 'erm_vect', 'escape_vect', 'especially_vect', 'esplanade_vect', 'eta_vect', 'etc_vect', 'euro_vect', 'euro2004_vect', 'europe_vect', 'eve_vect', 'eveb_vect', 'even_vect', 'evening_vect', 'event_vect', 'ever_vect', 'every_vect', 'everybody_vect', 'everyday_vect', 'everyone_vect', 'everything_vect', 'everywhere_vect', 'evn_vect', 'evng_vect', 'ex_vect', 'exact_vect', 'exactly_vect', 'exam_vect', 'exams_vect', 'excellent_vect', 'except_vect', 'exciting_vect', 'excuse_vect', 'excuses_vect', 'executive_vect', 'exeter_vect', 'exhausted_vect', 'expect_vect', 'expecting_vect', 'expensive_vect', 'experience_vect', 'expired_vect', 'expires_vect', 'explain_vect', 'explicit_vect', 'explosive_vect', 'express_vect', 'extra_vect', 'eye_vect', 'eyes_vect', 'fa_vect', 'fab_vect', 'face_vect', 'facebook_vect', 'fact_vect', 'faggy_vect', 'failed_vect', 'fair_vect', 'faith_vect', 'fall_vect', 'falls_vect', 'family_vect', 'fan_vect', 'fancies_vect', 'fancy_vect', 'fantasies_vect', 'fantastic_vect', 'fantasy_vect', 'far_vect', 'farm_vect', 'fast_vect', 'faster_vect', 'fat_vect', 'father_vect', 'fathima_vect', 'fault_vect', 'fave_vect', 'favorite_vect', 'favour_vect', 'favourite_vect', 'fb_vect', 'feb_vect', 'february_vect', 'feel_vect', 'feelin_vect', 'feeling_vect', 'feels_vect', 'fees_vect', 'feet_vect', 'fell_vect', 'felt_vect', 'fetch_vect', 'fever_vect', 'field_vect', 'fifteen_vect', 'fight_vect', 'fighting_vect', 'figure_vect', 'file_vect', 'files_vect', 'fill_vect', 'filling_vect', 'fills_vect', 'film_vect', 'final_vect', 'finally_vect', 'find_vect', 'fine_vect', 'fingers_vect', 'finish_vect', 'finished_vect', 'first_vect', 'fit_vect', 'fix_vect', 'fixed_vect', 'flag_vect', 'flaked_vect', 'flash_vect', 'flat_vect', 'flight_vect', 'flights_vect', 'flirt_vect', 'floor_vect', 'flower_vect', 'fml_vect', 'fo_vect', 'follow_vect', 'followed_vect', 'following_vect', 'fone_vect', 'food_vect', 'fool_vect', 'football_vect', 'force_vect', 'foreign_vect', 'forever_vect', 'forevr_vect', 'forget_vect', 'forgets_vect', 'forgiven_vect', 'forgot_vect', 'format_vect', 'forums_vect', 'forward_vect', 'forwarded_vect', 'found_vect', 'four_vect', 'fr_vect', 'fran_vect', 'freak_vect', 'free_vect', 'freefone_vect', 'freemsg_vect', 'freephone_vect', 'freezing_vect', 'fren_vect', 'frens_vect', 'fret_vect', 'fri_vect', 'friday_vect', 'friend_vect', 'friends_vect', 'friendship_vect', 'fringe_vect', 'frm_vect', 'frnd_vect', 'frnds_vect', 'frndship_vect', 'fuck_vect', 'fuckin_vect', 'fucking_vect', 'ful_vect', 'full_vect', 'fullonsmscom_vect', 'fun_vect', 'funny_vect', 'future_vect', 'fyi_vect', 'gal_vect', 'gals_vect', 'game_vect', 'games_vect', 'gang_vect', 'gap_vect', 'gaps_vect', 'garage_vect', 'garbage_vect', 'gary_vect', 'gas_vect', 'gautham_vect', 'gave_vect', 'gay_vect', 'gd_vect', 'ge_vect', 'gee_vect', 'geeee_vect', 'geeeee_vect', 'gender_vect', 'generally_vect', 'genius_vect', 'gentle_vect', 'gentleman_vect', 'gently_vect', 'germany_vect', 'get_vect', 'gets_vect', 'gettin_vect', 'getting_vect', 'gf_vect', 'gibbs_vect', 'gift_vect', 'gim_vect', 'girl_vect', 'girls_vect', 'gist_vect', 'giv_vect', 'give_vect', 'given_vect', 'gives_vect', 'giving_vect', 'glad_vect', 'gn_vect', 'go_vect', 'goal_vect', 'god_vect', 'goes_vect', 'goin_vect', 'going_vect', 'gold_vect', 'gon_vect', 'gona_vect', 'gone_vect', 'good_vect', 'goodmorning_vect', 'goodnight_vect', 'goodnite_vect', 'google_vect', 'gorgeous_vect', 'gossip_vect', 'got_vect', 'goto_vect', 'gotten_vect', 'govtinstituitions_vect', 'gr8_vect', 'grace_vect', 'gram_vect', 'grand_vect', 'granite_vect', 'gravity_vect', 'great_vect', 'green_vect', 'greet_vect', 'greetings_vect', 'grins_vect', 'grl_vect', 'ground_vect', 'group_vect', 'gt_vect', 'guaranteed_vect', 'gud_vect', 'gudnite_vect', 'guess_vect', 'guessing_vect', 'guide_vect', 'guilty_vect', 'guy_vect', 'guys_vect', 'gym_vect', 'ha_vect', 'haf_vect', 'haha_vect', 'hai_vect', 'hair_vect', 'haiz_vect', 'half_vect', 'halloween_vect', 'ham_vect', 'hand_vect', 'handed_vect', 'handle_vect', 'hands_vect', 'handset_vect', 'hanging_vect', 'happen_vect', 'happened_vect', 'happening_vect', 'happens_vect', 'happiness_vect', 'happy_vect', 'hard_vect', 'hardcore_vect', 'hardly_vect', 'harry_vect', 'hate_vect', 'hav_vect', 'havent_vect', 'havenåõt_vect', 'havin_vect', 'head_vect', 'headache_vect', 'headin_vect', 'heads_vect', 'hear_vect', 'heard_vect', 'heart_vect', 'heavy_vect', 'hee_vect', 'height_vect', 'held_vect', 'helen_vect', 'hell_vect', 'hella_vect', 'hello_vect', 'help_vect', 'help08712400602450p_vect', 'helpline_vect', 'hence_vect', 'henry_vect', 'heri_vect', 'herlove_vect', 'hes_vect', 'hex_vect', 'hey_vect', 'hgsuite3422lands_vect', 'hi_vect', 'hide_vect', 'high_vect', 'hill_vect', 'hint_vect', 'hip_vect', 'history_vect', 'hit_vect', 'hiya_vect', 'hl_vect', 'hlp_vect', 'hmm_vect', 'hmmm_vect', 'hmv_vect', 'ho_vect', 'hockey_vect', 'hol_vect', 'hold_vect', 'holder_vect', 'holding_vect', 'holiday_vect', 'holla_vect', 'hols_vect', 'holy_vect', 'home_vect', 'honey_vect', 'hook_vect', 'hop_vect', 'hope_vect', 'hoped_vect', 'hopefully_vect', 'hoping_vect', 'horny_vect', 'horo_vect', 'horrible_vect', 'hospital_vect', 'hospitals_vect', 'hostel_vect', 'hot_vect', 'hotel_vect', 'hotels_vect', 'hour_vect', 'hours_vect', 'house_vect', 'housemaid_vect', 'however_vect', 'hows_vect', 'howz_vect', 'hr_vect', 'hrishi_vect', 'hrs_vect', 'http_vect', 'hubby_vect', 'hug_vect', 'huh_vect', 'hun_vect', 'hungry_vect', 'hunny_vect', 'hurry_vect', 'hurt_vect', 'hurting_vect', 'hurts_vect', 'husband_vect', 'hv_vect', 'hw_vect', 'hyde_vect', 'iam_vect', 'ibhltd_vect', 'ibiza_vect', 'ic_vect', 'ice_vect', 'id_vect', 'idea_vect', 'ideal_vect', 'ideas_vect', 'identifier_vect', 'idiot_vect', 'idk_vect', 'ignore_vect', 'ikea_vect', 'il_vect', 'ill_vect', 'im_vect', 'imagine_vect', 'imma_vect', 'immediately_vect', 'imp_vect', 'important_vect', 'impossible_vect', 'improved_vect', 'in2_vect', 'inc_vect', 'inches_vect', 'incident_vect', 'include_vect', 'including_vect', 'inclusive_vect', 'inconsiderate_vect', 'indeed_vect', 'india_vect', 'indian_vect', 'indians_vect', 'indicate_vect', 'infections_vect', 'infernal_vect', 'info_vect', 'inform_vect', 'information_vect', 'informed_vect', 'innings_vect', 'insha_vect', 'inside_vect', 'instantly_vect', 'instead_vect', 'instructions_vect', 'insurance_vect', 'intelligent_vect', 'interest_vect', 'interested_vect', 'interesting_vect', 'interflora_vect', 'internet_vect', 'intro_vect', 'invest_vect', 'invite_vect', 'invited_vect', 'inviting_vect', 'iouri_vect', 'ip4_vect', 'ipod_vect', 'irritating_vect', 'iscoming_vect', 'ish_vect', 'island_vect', 'islands_vect', 'isnt_vect', 'issue_vect', 'issues_vect', 'it_vect', 'italian_vect', 'its_vect', 'itz_vect', 'itåõs_vect', 'ive_vect', 'iz_vect', 'izzit_vect', 'iåõm_vect', 'jacket_vect', 'jamster_vect', 'jan_vect', 'january_vect', 'jason_vect', 'java_vect', 'jay_vect', 'jazz_vect', 'jealous_vect', 'jeans_vect', 'jen_vect', 'jenny_vect', 'jess_vect', 'jesus_vect', 'jiayin_vect', 'jiu_vect', 'jo_vect', 'joanna_vect', 'job_vect', 'jogging_vect', 'john_vect', 'join_vect', 'joined_vect', 'joining_vect', 'joke_vect', 'jokes_vect', 'jokin_vect', 'joking_vect', 'jolly_vect', 'joy_vect', 'jsco_vect', 'jst_vect', 'juan_vect', 'juicy_vect', 'july_vect', 'june_vect', 'jus_vect', 'juz_vect', 'k52_vect', 'kadeem_vect', 'kaiez_vect', 'kallis_vect', 'kano_vect', 'kappa_vect', 'karaoke_vect', 'kate_vect', 'kay_vect', 'kb_vect', 'ke_vect', 'keep_vect', 'keeping_vect', 'keeps_vect', 'kent_vect', 'kept_vect', 'kerala_vect', 'key_vect', 'keys_vect', 'ki_vect', 'kick_vect', 'kid_vect', 'kidding_vect', 'kids_vect', 'kidz_vect', 'kind_vect', 'kinda_vect', 'kindly_vect', 'king_vect', 'kiss_vect', 'kisses_vect', 'kk_vect', 'knackered_vect', 'knew_vect', 'knock_vect', 'know_vect', 'knowing_vect', 'knows_vect', 'knw_vect', 'kz_vect', 'l8r_vect', 'la_vect', 'lab_vect', 'ladies_vect', 'lady_vect', 'lag_ve |
ct', 'laid_vect', 'land_vect', 'landline_vect', 'langport_vect', 'language_vect', 'laptop_vect', 'lar_vect', 'largest_vect', 'last_vect', 'late_vect', 'later_vect', 'latest_vect', 'latr_vect', 'laugh_vect', 'laughing_vect', 'law_vect', 'lazy_vect', 'ldn_vect', 'ldnw15h_vect', 'le_vect', 'lead_vect', 'learn_vect', 'least_vect', 'leave_vect', 'leaves_vect', 'leaving_vect', 'lect_vect', 'lecture_vect', 'left_vect', 'legal_vect', 'legs_vect', 'leh_vect', 'lei_vect', 'lem_vect', 'less_vect', 'lesson_vect', 'lessons_vect', 'let_vect', 'lets_vect', 'letter_vect', 'letters_vect', 'liao_vect', 'library_vect', 'lick_vect', 'licks_vect', 'lido_vect', 'lie_vect', 'lies_vect', 'life_vect', 'lifetime_vect', 'lift_vect', 'light_vect', 'lik_vect', 'like_vect', 'liked_vect', 'likely_vect', 'likes_vect', 'lil_vect', 'line_vect', 'linerental_vect', 'lines_vect', 'link_vect', 'lion_vect', 'lionm_vect', 'lionp_vect', 'lions_vect', 'lip_vect', 'list_vect', 'listen_vect', 'listening_vect', 'literally_vect', 'little_vect', 'live_vect', 'liverpool_vect', 'living_vect', 'lk_vect', 'll_vect', 'lmao_vect', 'lo_vect', 'loads_vect', 'loan_vect', 'loans_vect', 'local_vect', 'locations_vect', 'lock_vect', 'log_vect', 'login_vect', 'logo_vect', 'logopic_vect', 'lol_vect', 'london_vect', 'lonely_vect', 'long_vect', 'longer_vect', 'look_vect', 'lookatme_vect', 'looked_vect', 'lookin_vect', 'looking_vect', 'looks_vect', 'lor_vect', 'lose_vect', 'losing_vect', 'loss_vect', 'lost_vect', 'lot_vect', 'lotr_vect', 'lots_vect', 'lou_vect', 'loud_vect', 'lounge_vect', 'lousy_vect', 'lovable_vect', 'love_vect', 'loved_vect', 'lovely_vect', 'loveme_vect', 'lover_vect', 'loverboy_vect', 'lovers_vect', 'loves_vect', 'loving_vect', 'low_vect', 'lower_vect', 'loyal_vect', 'loyalty_vect', 'ls15hb_vect', 'lt_vect', 'ltd_vect', 'luck_vect', 'lucky_vect', 'lucy_vect', 'lunch_vect', 'luv_vect', 'lux_vect', 'luxury_vect', 'lyf_vect', 'lyfu_vect', 'lyk_vect', 'm227xy_vect', 'm26_vect', 'm263uz_vect', 'm8s_vect', 'mac_vect', 'machan_vect', 'macho_vect', 'mad_vect', 'madam_vect', 'made_vect', 'mag_vect', 'maga_vect', 'magical_vect', 'mah_vect', 'mail_vect', 'mailbox_vect', 'main_vect', 'maintain_vect', 'major_vect', 'make_vect', 'makes_vect', 'makin_vect', 'making_vect', 'malaria_vect', 'male_vect', 'mall_vect', 'man_vect', 'managed_vect', 'management_vect', 'many_vect', 'map_vect', 'march_vect', 'mark_vect', 'market_vect', 'marriage_vect', 'married_vect', 'marry_vect', 'masters_vect', 'match_vect', 'matches_vect', 'mate_vect', 'mates_vect', 'matrix3_vect', 'matter_vect', 'max10mins_vect', 'maximize_vect', 'maxå_vect', 'may_vect', 'mayb_vect', 'maybe_vect', 'mca_vect', 'mcat_vect', 'meal_vect', 'mean_vect', 'meaning_vect', 'means_vect', 'meant_vect', 'meanwhile_vect', 'med_vect', 'medical_vect', 'medicine_vect', 'meds_vect', 'meet_vect', 'meetin_vect', 'meeting_vect', 'meh_vect', 'mei_vect', 'member_vect', 'members_vect', 'men_vect', 'menu_vect', 'merry_vect', 'mess_vect', 'message_vect', 'messaged_vect', 'messages_vect', 'messaging_vect', 'met_vect', 'mid_vect', 'middle_vect', 'midnight_vect', 'mids_vect', 'might_vect', 'miles_vect', 'milk_vect', 'min_vect', 'mind_vect', 'mine_vect', 'mini_vect', 'minimum_vect', 'minor_vect', 'mins_vect', 'minute_vect', 'minutes_vect', 'minuts_vect', 'miracle_vect', 'mis_vect', 'miserable_vect', 'miss_vect', 'missed_vect', 'missin_vect', 'missing_vect', 'mistake_vect', 'mistakes_vect', 'mite_vect', 'mm_vect', 'mmm_vect', 'mmmm_vect', 'mmmmmm_vect', 'mnths_vect', 'mo_vect', 'moan_vect', 'mob_vect', 'mobile_vect', 'mobiles_vect', 'mobilesdirect_vect', 'mobilesvary_vect', 'mobileupd8_vect', 'mobno_vect', 'moby_vect', 'mode_vect', 'model_vect', 'module_vect', 'modules_vect', 'moji_vect', 'mojibiola_vect', 'mokka_vect', 'mom_vect', 'moment_vect', 'moments_vect', 'moms_vect', 'mon_vect', 'monday_vect', 'money_vect', 'monkeys_vect', 'mono_vect', 'month_vect', 'monthly_vect', 'months_vect', 'mood_vect', 'moon_vect', 'moral_vect', 'morn_vect', 'morning_vect', 'mostly_vect', 'mother_vect', 'motorola_vect', 'mouth_vect', 'move_vect', 'moved_vect', 'movie_vect', 'movies_vect', 'moving_vect', 'mp3_vect', 'mr_vect', 'mrng_vect', 'mrt_vect', 'msg_vect', 'msgs_vect', 'mths_vect', 'mu_vect', 'much_vect', 'mum_vect', 'mummy_vect', 'murder_vect', 'murdered_vect', 'murderer_vect', 'music_vect', 'must_vect', 'muz_vect', 'na_vect', 'nag_vect', 'nah_vect', 'naked_vect', 'name_vect', 'name1_vect', 'name2_vect', 'named_vect', 'names_vect', 'nan_vect', 'nap_vect', 'nasdaq_vect', 'nat_vect', 'national_vect', 'natural_vect', 'nature_vect', 'naughty_vect', 'nb_vect', 'nd_vect', 'ne_vect', 'near_vect', 'nearly_vect', 'necessarily_vect', 'necklace_vect', 'ned_vect', 'need_vect', 'needed_vect', 'needs_vect', 'neither_vect', 'net_vect', 'netcollex_vect', 'network_vect', 'networks_vect', 'neva_vect', 'never_vect', 'new_vect', 'newest_vect', 'news_vect', 'next_vect', 'ni8_vect', 'nice_vect', 'nigeria_vect', 'night_vect', 'nights_vect', 'nimya_vect', 'nite_vect', 'no1_vect', 'nobody_vect', 'noe_vect', 'nokia_vect', 'nokias_vect', 'noline_vect', 'none_vect', 'noon_vect', 'nope_vect', 'norm_vect', 'norm150ptone_vect', 'normal_vect', 'normally_vect', 'northampton_vect', 'note_vect', 'nothin_vect', 'nothing_vect', 'notice_vect', 'noun_vect', 'nowadays_vect', 'nt_vect', 'ntt_vect', 'ntwk_vect', 'num_vect', 'number_vect', 'numbers_vect', 'nuther_vect', 'nvm_vect', 'nw_vect', 'nxt_vect', 'nyc_vect', 'nydc_vect', 'nyt_vect', 'o2_vect', 'obviously_vect', 'odi_vect', 'offer_vect', 'offers_vect', 'office_vect', 'official_vect', 'officially_vect', 'ofice_vect', 'often_vect', 'oh_vect', 'oi_vect', 'oic_vect', 'ok_vect', 'okay_vect', 'okey_vect', 'okie_vect', 'ola_vect', 'old_vect', 'omg_vect', 'omw_vect', 'one_vect', 'ones_vect', 'oni_vect', 'online_vect', 'onto_vect', 'onwards_vect', 'oooh_vect', 'oops_vect', 'open_vect', 'opening_vect', 'operator_vect', 'opinion_vect', 'opportunity_vect', 'opt_vect', 'option_vect', 'optout_vect', 'or2stoptxt_vect', 'orange_vect', 'oranges_vect', 'orchard_vect', 'order_vect', 'ordered_vect', 'oredi_vect', 'original_vect', 'oru_vect', 'os_vect', 'oso_vect', 'others_vect', 'otherwise_vect', 'otside_vect', 'outside_vect', 'outstanding_vect', 'outta_vect', 'ovulation_vect', 'oz_vect', 'pa_vect', 'pack_vect', 'package_vect', 'page_vect', 'pages_vect', 'paid_vect', 'pain_vect', 'painful_vect', 'painting_vect', 'panic_vect', 'paper_vect', 'papers_vect', 'paperwork_vect', 'parco_vect', 'parent_vect', 'parents_vect', 'paris_vect', 'park_vect', 'parked_vect', 'parking_vect', 'part_vect', 'partner_vect', 'partnership_vect', 'party_vect', 'pass_vect', 'passed_vect', 'password_vect', 'past_vect', 'pattern_vect', 'patty_vect', 'pay_vect', 'paying_vect', 'payment_vect', 'payoh_vect', 'pc_vect', 'peace_vect', 'pence_vect', 'people_vect', 'per_vect', 'perfect_vect', 'period_vect', 'person_vect', 'personal_vect', 'pete_vect', 'petey_vect', 'pg_vect', 'philosophy_vect', 'phoenix_vect', 'phone_vect', 'phones_vect', 'photo_vect', 'photos_vect', 'pic_vect', 'pick_vect', 'picked_vect', 'picking_vect', 'pics_vect', 'picsfree1_vect', 'picture_vect', 'pictures_vect', 'pie_vect', 'pieces_vect', 'pig_vect', 'pilates_vect', 'pin_vect', 'pink_vect', 'piss_vect', 'pissed_vect', 'pix_vect', 'pizza_vect', 'place_vect', 'places_vect', 'plan_vect', 'planned_vect', 'planning_vect', 'plans_vect', 'play_vect', 'played_vect', 'player_vect', 'players_vect', 'playing_vect', 'please_vect', 'pleased_vect', 'pleasure_vect', 'plenty_vect', 'pls_vect', 'plus_vect', 'plz_vect', 'pm_vect', 'po_vect', 'pobox_vect', 'pobox334_vect', 'pobox36504w45wq_vect', 'pobox45w2tg150p_vect', 'pobox84_vect', 'pod_vect', 'point_vect', 'points_vect', 'poker_vect', 'pole_vect', 'police_vect', 'politicians_vect', 'poly_vect', 'polyphonic_vect', 'polys_vect', 'pongal_vect', 'poor_vect', 'pop_vect', 'popped_vect', 'porn_vect', 'possession_vect', 'possible_vect', 'post_vect', 'postcard_vect', 'postcode_vect', 'posted_vect', 'posts_vect', 'potential_vect', 'potter_vect', 'pound_vect', 'pounds_vect', 'pouts_vect', 'power_vect', 'ppl_vect', 'pple_vect', 'ppm_vect', 'prabha_vect', 'practice_vect', 'practicing_vect', 'pray_vect', 'prefer_vect', 'premier_vect', 'prepare_vect', 'prescription_vect', 'present_vect', 'press_vect', 'pretty_vect', 'prey_vect', 'price_vect', 'prince_vect', 'princess_vect', 'print_vect', 'privacy_vect', 'private_vect', 'prize_vect', 'prob_vect', 'probably_vect', 'problem_vect', 'problems_vect', 'process_vect', 'processed_vect', 'prof_vect', 'profit_vect', 'program_vect', |
'project_vect', 'prolly_vect', 'promise_vect', 'promises_vect', 'promo_vect', 'proof_vect', 'properly_vect', 'prospects_vect', 'provided_vect', 'ps_vect', 'ptbo_vect', 'pub_vect', 'pull_vect', 'purchase_vect', 'purity_vect', 'purpose_vect', 'push_vect', 'pushes_vect', 'pussy_vect', 'put_vect', 'puttin_vect', 'putting_vect', 'qatar_vect', 'quality_vect', 'queen_vect', 'ques_vect', 'question_vect', 'questions_vect', 'quick_vect', 'quickly_vect', 'quiet_vect', 'quit_vect', 'quite_vect', 'quiz_vect', 'quote_vect', 'quoting_vect', 'racing_vect', 'radio_vect', 'railway_vect', 'rain_vect', 'raining_vect', 'raise_vect', 'rakhesh_vect', 'rally_vect', 'ran_vect', 'random_vect', 'randomly_vect', 'randy_vect', 'rang_vect', 'range_vect', 'ranjith_vect', 'rate_vect', 'rates_vect', 'rather_vect', 'rays_vect', 'rcvd_vect', 'rd_vect', 're_vect', 'reach_vect', 'reached_vect', 'reaching_vect', 'reaction_vect', 'read_vect', 'readers_vect', 'reading_vect', 'ready_vect', 'real_vect', 'realise_vect', 'reality_vect', 'realized_vect', 'really_vect', 'realy_vect', 'reason_vect', 'reasonable_vect', 'reasons_vect', 'reboot_vect', 'recd_vect', 'receipt_vect', 'receive_vect', 'received_vect', 'receiving_vect', 'recent_vect', 'recently_vect', 'recession_vect', 'record_vect', 'records_vect', 'recovery_vect', 'red_vect', 'ref_vect', 'reference_vect', 'reg_vect', 'regards_vect', 'register_vect', 'registered_vect', 'regret_vect', 'regular_vect', 'relation_vect', 'relax_vect', 'released_vect', 'rem_vect', 'remain_vect', 'remains_vect', 'remember_vect', 'remembered_vect', 'remembr_vect', 'remind_vect', 'reminder_vect', 'remove_vect', 'rent_vect', 'rental_vect', 'rentl_vect', 'repair_vect', 'repeat_vect', 'replied_vect', 'reply_vect', 'replying_vect', 'report_vect', 'representative_vect', 'request_vect', 'requests_vect', 'research_vect', 'resend_vect', 'respect_vect', 'respond_vect', 'responding_vect', 'response_vect', 'responsibility_vect', 'rest_vect', 'restaurant_vect', 'result_vect', 'results_vect', 'retrieve_vect', 'return_vect', 'returned_vect', 'returns_vect', 'reveal_vect', 'revealed_vect', 'review_vect', 'revision_vect', 'reward_vect', 'rhythm_vect', 'rice_vect', 'rich_vect', 'ride_vect', 'right_vect', 'rights_vect', 'ring_vect', 'ringtone_vect', 'ringtones_vect', 'rite_vect', 'river_vect', 'road_vect', 'roads_vect', 'roast_vect', 'rock_vect', 'rocks_vect', 'rofl_vect', 'roger_vect', 'role_vect', 'ron_vect', 'room_vect', 'roommate_vect', 'roommates_vect', 'rooms_vect', 'rose_vect', 'round_vect', 'row_vect', 'roww1j6hl_vect', 'roww1jhl_vect', 'royal_vect', 'rply_vect', 'rs_vect', 'rstm_vect', 'ru_vect', 'rub_vect', 'rude_vect', 'rule_vect', 'run_vect', 'running_vect', 'runs_vect', 'rush_vect', 'sacrifice_vect', 'sad_vect', 'sae_vect', 'safe_vect', 'said_vect', 'salam_vect', 'salary_vect', 'sale_vect', 'salon_vect', 'sam_vect', 'santa_vect', 'sar_vect', 'sarasota_vect', 'sarcastic_vect', 'sary_vect', 'sat_vect', 'sathya_vect', 'saturday_vect', 'savamob_vect', 'save_vect', 'saved_vect', 'saw_vect', 'say_vect', 'saying_vect', 'says_vect', 'scared_vect', 'scary_vect', 'sch_vect', 'schedule_vect', 'school_vect', 'schools_vect', 'science_vect', 'scold_vect', 'score_vect', 'scores_vect', 'scotland_vect', 'scream_vect', 'screaming_vect', 'scrounge_vect', 'se_vect', 'sea_vect', 'search_vect', 'searching_vect', 'season_vect', 'seat_vect', 'sec_vect', 'second_vect', 'seconds_vect', 'secret_vect', 'secretary_vect', 'secs_vect', 'sed_vect', 'see_vect', 'seeing_vect', 'seem_vect', 'seemed_vect', 'seems_vect', 'seen_vect', 'selected_vect', 'selection_vect', 'self_vect', 'sell_vect', 'selling_vect', 'sells_vect', 'sem_vect', 'semester_vect', 'sen_vect', 'send_vect', 'sender_vect', 'sending_vect', 'sense_vect', 'sent_vect', 'sentence_vect', 'sept_vect', 'series_vect', 'serious_vect', 'seriously_vect', 'service_vect', 'services_vect', 'serving_vect', 'set_vect', 'setting_vect', 'settings_vect', 'settle_vect', 'settled_vect', 'seven_vect', 'several_vect', 'sex_vect', 'sexy_vect', 'sh_vect', 'sha_vect', 'shall_vect', 'share_vect', 'shd_vect', 'sheets_vect', 'shes_vect', 'shesil_vect', 'shining_vect', 'ship_vect', 'shipping_vect', 'shirt_vect', 'shirts_vect', 'shit_vect', 'shld_vect', 'shocking_vect', 'shoot_vect', 'shop_vect', 'shoppin_vect', 'shopping_vect', 'short_vect', 'shorter_vect', 'shortly_vect', 'shot_vect', 'shoving_vect', 'show_vect', 'shower_vect', 'showing_vect', 'shows_vect', 'shu_vect', 'shuhui_vect', 'shy_vect', 'si_vect', 'sick_vect', 'side_vect', 'sighs_vect', 'sight_vect', 'sign_vect', 'signing_vect', 'silence_vect', 'silent_vect', 'silver_vect', 'sim_vect', 'simple_vect', 'simply_vect', 'since_vect', 'sing_vect', 'singing_vect', 'single_vect', 'singles_vect', 'sipix_vect', 'sir_vect', 'sis_vect', 'sister_vect', 'sit_vect', 'site_vect', 'sitting_vect', 'situation_vect', 'siva_vect', 'six_vect', 'size_vect', 'sk3_vect', 'sk38xh_vect', 'skilgme_vect', 'skip_vect', 'sky_vect', 'skype_vect', 'skyped_vect', 'slap_vect', 'slave_vect', 'sleep_vect', 'sleepin_vect', 'sleeping_vect', 'sleepy_vect', 'slept_vect', 'slice_vect', 'slide_vect', 'slightly_vect', 'slip_vect', 'slippers_vect', 'slo_vect', 'slots_vect', 'slow_vect', 'slowly_vect', 'small_vect', 'smashed_vect', 'smile_vect', 'smiles_vect', 'smiling_vect', 'smoke_vect', 'smoking_vect', 'sms_vect', 'smth_vect', 'sn_vect', 'snake_vect', 'snow_vect', 'social_vect', 'sofa_vect', 'soft_vect', 'software_vect', 'sol_vect', 'some1_vect', 'somebody_vect', 'someone_vect', 'somethin_vect', 'something_vect', 'sometimes_vect', 'somewhere_vect', 'song_vect', 'songs_vect', 'sony_vect', 'sonyericsson_vect', 'soon_vect', 'sooner_vect', 'sore_vect', 'sorry_vect', 'sort_vect', 'sorting_vect', 'sound_vect', 'sounds_vect', 'south_vect', 'sp_vect', 'space_vect', 'spanish_vect', 'speak_vect', 'speaking_vect', 'special_vect', 'specialcall_vect', 'specially_vect', 'speed_vect', 'spend_vect', 'spending_vect', 'spent_vect', 'spk_vect', 'spoke_vect', 'spoken_vect', 'spook_vect', 'sport_vect', 'sports_vect', 'spree_vect', 'spring_vect', 'sptv_vect', 'sry_vect', 'st_vect', 'staff_vect', 'stamps_vect', 'stand_vect', 'standard_vect', 'standing_vect', 'star_vect', 'staring_vect', 'start_vect', 'started_vect', 'starting_vect', 'starts_vect', 'starwars3_vect', 'statement_vect', 'station_vect', 'stay_vect', 'stayed_vect', 'staying_vect', 'std_vect', 'steam_vect', 'step_vect', 'steve_vect', 'stick_vect', 'sticky_vect', 'still_vect', 'stock_vect', 'stockport_vect', 'stomach_vect', 'stomps_vect', 'stones_vect', 'stop_vect', 'stopped_vect', 'stops_vect', 'store_vect', 'stores_vect', 'story_vect', 'str_vect', 'straight_vect', 'stranger_vect', 'street_vect', 'stress_vect', 'strike_vect', 'strong_vect', 'strongbuy_vect', 'stuck_vect', 'student_vect', 'study_vect', 'studying_vect', 'stuff_vect', 'stupid_vect', 'style_vect', 'stylish_vect', 'sub_vect', 'subpoly_vect', 'subs_vect', 'subscribe6gbpmnth_vect', 'subscribed_vect', 'subscriber_vect', 'subscription_vect', 'success_vect', 'successful_vect', 'successfully_vect', 'sucks_vect', 'sue_vect', 'sufficient_vect', 'suggest_vect', 'suite_vect', 'suits_vect', 'sum1_vect', 'summer_vect', 'sun_vect', 'sunday_vect', 'sunlight_vect', 'sunny_vect', 'sunshine_vect', 'suntec_vect', 'sup_vect', 'super_vect', 'superb_vect', 'superior_vect', 'supervisor_vect', 'supply_vect', 'support_vect', 'suppose_vect', 'supposed_vect', 'suprman_vect', 'sura_vect', 'sure_vect', 'surely_vect', 'surfing_vect', 'surprise_vect', 'surprised_vect', 'survey_vect', 'sux_vect', 'suzy_vect', 'sw7_vect', 'sw73ss_vect', 'sweet_vect', 'swing_vect', 'system_vect', 'ta_vect', 'tablets_vect', 'tahan_vect', 'take_vect', 'taken_vect', 'takes_vect', 'takin_vect', 'taking_vect', 'talent_vect', 'talk_vect', 'talking_vect', 'tampa_vect', 'tape_vect', 'tariffs_vect', 'tat_vect', 'taunton_vect', 'taylor_vect', 'tb_vect', 'tc_vect', 'tcrw1_vect', 'tcs_vect', 'tea_vect', 'teach_vect', 'teacher_vect', 'teaches_vect', 'team_vect', 'tear_vect', 'tease_vect', 'teasing_vect', 'tech_vect', 'technical_vect', 'tee_vect', 'teeth_vect', 'tel_vect', 'telephone_vect', 'tell_vect', 'telling_vect', 'tells_vect', 'telugu_vect', 'temple_vect', 'ten_vect', 'tenants_vect', 'tenerife_vect', 'tension_vect', 'term_vect', 'terms_vect', 'terrible_vect', 'test_vect', 'testing_vect', 'text_vect', 'texted_vect', 'texting_vect', 'textoperator_vect', 'texts_vect', 'th_vect', 'thangam_vect', 'thank_vect', 'thanks_vect', 'thanksgiving_vect', 'thanx_vect', 'that_vect', 'thats_vect', 'thatåõs_vect', 'the_vect', 'theatre_vect', 'themob_vect', 'theory_vect', 'thesis_vect', 'thgt_vect', 'thing_vect', 'things_vect', 'think_vect', 'thinkin_vect', 'thinking_vect', 'thinks_vect', 'thk_vect', 'thnk_vect', 'tho_vect', 'though_vect', 'thought_vect', 'three_vect', 'throat_vect', 'throw_vect', 'thru_vect', 'tht_vect', 'thts_ |
vect', 'thurs_vect', 'thursday_vect', 'tick_vect', 'ticket_vect', 'tickets_vect', 'tight_vect', 'tihs_vect', 'til_vect', 'till_vect', 'time_vect', 'times_vect', 'timing_vect', 'tired_vect', 'tirunelvali_vect', 'tirupur_vect', 'tissco_vect', 'tkts_vect', 'tm_vect', 'tming_vect', 'tmobile_vect', 'tmr_vect', 'tncs_vect', 'toa_vect', 'toclaim_vect', 'today_vect', 'todays_vect', 'tog_vect', 'together_vect', 'tok_vect', 'told_vect', 'tomarrow_vect', 'tomo_vect', 'tomorrow_vect', 'tone_vect', 'tones_vect', 'tones2youcouk_vect', 'tonight_vect', 'tonite_vect', 'took_vect', 'tool_vect', 'tooo_vect', 'toot_vect', 'top_vect', 'topic_vect', 'torch_vect', 'toshiba_vect', 'tot_vect', 'total_vect', 'totally_vect', 'touch_vect', 'tough_vect', 'tour_vect', 'towards_vect', 'town_vect', 'track_vect', 'trade_vect', 'traffic_vect', 'train_vect', 'training_vect', 'transaction_vect', 'transfer_vect', 'transport_vect', 'travel_vect', 'treat_vect', 'treated_vect', 'tried_vect', 'trip_vect', 'trips_vect', 'trouble_vect', 'true_vect', 'truffles_vect', 'truly_vect', 'trust_vect', 'truth_vect', 'try_vect', 'trying_vect', 'ts_vect', 'tscs_vect', 'tscs087147403231winawk_vect', 'tt_vect', 'ttyl_vect', 'tues_vect', 'tuesday_vect', 'tuition_vect', 'turn_vect', 'turning_vect', 'turns_vect', 'tv_vect', 'twelve_vect', 'twice_vect', 'two_vect', 'txt_vect', 'txtauction_vect', 'txtin_vect', 'txting_vect', 'txtno_vect', 'txts_vect', 'txtstop_vect', 'tyler_vect', 'type_vect', 'tyrone_vect', 'u4_vect', 'ubi_vect', 'ufind_vect', 'ugh_vect', 'uh_vect', 'uk_vect', 'uks_vect', 'ultimatum_vect', 'umma_vect', 'unable_vect', 'uncle_vect', 'understand_vect', 'understanding_vect', 'understood_vect', 'underwear_vect', 'unemployed_vect', 'uni_vect', 'unique_vect', 'university_vect', 'unless_vect', 'unlimited_vect', 'unnecessarily_vect', 'unredeemed_vect', 'unsold_vect', 'unsub_vect', 'unsubscribe_vect', 'upd8_vect', 'update_vect', 'updatenow_vect', 'upgrade_vect', 'upload_vect', 'upset_vect', 'upstairs_vect', 'ur_vect', 'ure_vect', 'urgent_vect', 'urgnt_vect', 'url_vect', 'urn_vect', 'urself_vect', 'us_vect', 'usb_vect', 'use_vect', 'used_vect', 'user_vect', 'usf_vect', 'using_vect', 'usual_vect', 'usually_vect', 'vale_vect', 'valentine_vect', 'valentines_vect', 'valid_vect', 'valid12hrs_vect', 'valuable_vect', 'value_vect', 'valued_vect', 'vary_vect', 've_vect', 'vegas_vect', 'verify_vect', 'version_vect', 'via_vect', 'vid_vect', 'video_vect', 'videochat_vect', 'videophones_vect', 'vijay_vect', 'vikky_vect', 'village_vect', 'violated_vect', 'violence_vect', 'vip_vect', 'virgin_vect', 'visit_vect', 'vivek_vect', 'vl_vect', 'voda_vect', 'vodafone_vect', 'vodka_vect', 'voice_vect', 'voicemail_vect', 'vomit_vect', 'vote_vect', 'voucher_vect', 'vouchers_vect', 'vry_vect', 'vth_vect', 'w45wq_vect', 'wa_vect', 'wah_vect', 'wait_vect', 'waited_vect', 'waitin_vect', 'waiting_vect', 'wake_vect', 'waking_vect', 'wales_vect', 'walk_vect', 'walked_vect', 'walking_vect', 'walmart_vect', 'wan_vect', 'wana_vect', 'want_vect', 'wanted_vect', 'wanting_vect', 'wants_vect', 'wap_vect', 'warm_vect', 'warner_vect', 'waste_vect', 'wasted_vect', 'wat_vect', 'watch_vect', 'watching_vect', 'water_vect', 'wats_vect', 'way_vect', 'wc1n3xx_vect', 'we_vect', 'weak_vect', 'wear_vect', 'wearing_vect', 'weather_vect', 'web_vect', 'website_vect', 'wed_vect', 'wedding_vect', 'wednesday_vect', 'wee_vect', 'weed_vect', 'week_vect', 'weekend_vect', 'weekends_vect', 'weekly_vect', 'weeks_vect', 'weigh_vect', 'weight_vect', 'weird_vect', 'welcome_vect', 'well_vect', 'welp_vect', 'wen_vect', 'went_vect', 'west_vect', 'wet_vect', 'what_vect', 'whatever_vect', 'whats_vect', 'whenever_vect', 'whenevr_vect', 'wherever_vect', 'whether_vect', 'white_vect', 'whn_vect', 'whole_vect', 'whos_vect', 'whose_vect', 'wid_vect', 'widelivecomindex_vect', 'wif_vect', 'wife_vect', 'wil_vect', 'willing_vect', 'win_vect', 'wind_vect', 'wine_vect', 'winner_vect', 'winning_vect', 'wins_vect', 'wipro_vect', 'wisdom_vect', 'wise_vect', 'wish_vect', 'wishes_vect', 'wishing_vect', 'wit_vect', 'within_vect', 'without_vect', 'wiv_vect', 'wk_vect', 'wkend_vect', 'wkg_vect', 'wkly_vect', 'wks_vect', 'wld_vect', 'wml_vect', 'wn_vect', 'wnt_vect', 'wo_vect', 'woke_vect', 'woken_vect', 'woman_vect', 'women_vect', 'wonder_vect', 'wonderful_vect', 'wondering_vect', 'wont_vect', 'woot_vect', 'word_vect', 'words_vect', 'work_vect', 'workin_vect', 'working_vect', 'works_vect', 'world_vect', 'worried_vect', 'worries_vect', 'worry_vect', 'worse_vect', 'worst_vect', 'worth_vect', 'wot_vect', 'would_vect', 'wow_vect', 'write_vect', 'wrong_vect', 'wtf_vect', 'wud_vect', 'wuld_vect', 'wun_vect', 'www4tcbiz_vect', 'wwwcomuknet_vect', 'wwwetlpcoukexpressoffer_vect', 'wwwgetzedcouk_vect', 'wwwldewcom_vect', 'wwwldewcom1win150ppmx3age16_vect', 'wwwmovietriviatv_vect', 'wwwringtonescouk_vect', 'wwwsmsconet_vect', 'wwwtxttowincouk_vect', 'wwwurawinnercom_vect', 'wylie_vect', 'xchat_vect', 'xmas_vect', 'xuhui_vect', 'xx_vect', 'xxx_vect', 'xxxx_vect', 'xxxxx_vect', 'xy_vect', 'ya_vect', 'yahoo_vect', 'yan_vect', 'yar_vect', 'yay_vect', 'yck_vect', 'yeah_vect', 'year_vect', 'years_vect', 'yelling_vect', 'yellow_vect', 'yep_vect', 'yes_vect', 'yest_vect', 'yesterday_vect', 'yet_vect', 'yetunde_vect', 'yijue_vect', 'ym_vect', 'yo_vect', 'yoga_vect', 'yogasana_vect', 'yor_vect', 'you_vect', 'yr_vect', 'yrs_vect', 'yummy_vect', 'yun_vect', 'yuo_vect', 'yup_vect', 'zed_vect', 'zindgi_vect', 'ìï_vect', 'ûò_vect']}, 'training': {'algo': 'Logistic Regression', 'model_file': 'AI0110_1.sav'}}
deployer = get_deployer('classification',params=config)
deployer.run( )<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import shutil
import subprocess
from os.path import expanduser
import platform
deploymentfolder = os.path.join(os.path.dirname(os.path.abspath(__file__)),'HCLT','AION','target')
modelname='AION_12'
version='1'
def createDockerImage(deploymentfolder,modelname,version,learner_type,textdata):
modelPath = os.path.join(deploymentfolder)
filename = os.path.join(deploymentfolder,'docker_image')
modelservice = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','run_modelService.py')
shellscript = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','start_modelservice.sh')
aix = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','AIX-0.1-py3-none-any.whl')
drift = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','Drift-0.1-py3-none-any.whl')
sitepackage = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','site-packages')
model_dockerSetup = os.path.join(os.path.dirname(os.path.abspath(__file__)),'dockersetup','docker_'+modelname + '_' + version)
docker_setup = os.path.join(model_dockerSetup,modelname + '_' + version)
model_sitepackage = os.path.join(model_dockerSetup,'site-packages')
model_dockerSetupservicefile = os.path.join(model_dockerSetup,'run_modelService.py')
model_dockershellscript = os.path.join(model_dockerSetup,'start_modelservice.sh')
model_aix = os.path.join(model_dockerSetup,'AIX-0.1-py3-none-any.whl')
model_drift = os.path.join(model_dockerSetup,'Drift-0.1-py3-none-any.whl')
try:
os.mkdir(model_dockerSetup)
except Exception as e:
print("Error in creating Setup directpry "+str(e))
pass
shutil.copytree(modelPath, docker_setup)
if textdata:
shutil.copytree(sitepackage, model_sitepackage)
modelpretrainpath=os.path.join(model_dockerSetup,'HCLT','AION','PreTrainedModels','TextProcessing')
'''
try:
os.makedirs(modelpretrainpath, exist_ok=True)
except Exception as e:
print("Error in creating Setup directpry "+str(e))
pass
'''
home = expanduser("~")
if platform.system() == 'Windows':
hostpretrainpath = os.path.join(home,'AppData','Local','HCLT','AION','PreTrainedModels','TextProcessing')
else:
hostpretrainpath = os.path.join(home,'HCLT','AION','PreTrainedModels','TextProcessing')
shutil.copytree(hostpretrainpath, modelpretrainpath)
shutil.copyfile(modelservice, model_dockerSetupservicefile)
shutil.copyfile(shellscript, model_dockershellscript)
shutil.copyfile(aix, model_aix)
shutil.copyfile(drift,model_drift)
try:
os.mkdir(filename)
except:
pass
requirementfilename = os.path.join(model_dockerSetup,'requirements.txt')
installfilename = os.path.join(model_dockerSetup,'install.py')
dockerfile = os.path.join(model_dockerSetup,'Dockerfile')
dockerdata='FROM python:3.8-slim-buster'
dockerdata+='\\n'
if textdata:
dockerdata+='WORKDIR /root'
dockerdata+='\\n'
dockerdata+='COPY HCLT HCLT'
dockerdata+='\\n'
dockerdata+='WORKDIR /app'
dockerdata+='\\n'
dockerdata+='COPY requirements.txt requirements.txt'
dockerdata+='\\n'
dockerdata+='COPY '+modelname+'_'+version+' '+modelname+'_'+version
dockerdata+='\\n'
if textdata:
dockerdata+='COPY site-packages site-packages'
dockerdata+='\\n'
|
dockerdata+='COPY install.py install.py'
dockerdata+='\\n'
dockerdata+='COPY run_modelService.py run_modelService.py'
dockerdata+='\\n'
dockerdata+='COPY AIX-0.1-py3-none-any.whl AIX-0.1-py3-none-any.whl'
dockerdata+='\\n'
dockerdata+='COPY Drift-0.1-py3-none-any.whl Drift-0.1-py3-none-any.whl'
dockerdata+='\\n'
dockerdata+='COPY start_modelservice.sh start_modelservice.sh'
dockerdata+='\\n'
if textdata:
dockerdata+='''RUN apt-get update \\
&& apt-get install -y build-essential manpages-dev \\
&& python -m pip install --no-cache-dir --upgrade pip \\
&& python -m pip install --no-cache-dir pandas==1.2.4 \\
&& python -m pip install --no-cache-dir numpy==1.19.5 \\
&& python -m pip install --no-cache-dir joblib==1.0.1 \\
&& python -m pip install --no-cache-dir Cython==0.29.23 \\
&& mv site-packages/* /usr/local/lib/python3.8/site-packages \\
&& python -m pip install --no-cache-dir scipy==1.6.3 \\
&& python -m pip install --no-cache-dir AIX-0.1-py3-none-any.whl \\
&& python -m pip install --no-cache-dir Drift-0.1-py3-none-any.whl \\
&& python -m pip install --no-cache-dir scikit-learn==0.24.2 \\
&& python -m pip install --no-cache-dir spacy==2.2.3 \\
&& python -m pip install --no-cache-dir nltk==3.6.2 \\
&& python -m pip install --no-cache-dir textblob==0.15.3 \\
&& python -m pip install --no-cache-dir gensim==3.8.3 \\
&& python -m pip install --no-cache-dir demoji==1.1.0 \\
&& python -m pip install --no-cache-dir lxml==4.6.3 \\
&& python -m pip install --no-cache-dir Beautifulsoup4==4.9.3 \\
&& python -m pip install --no-cache-dir Unidecode==1.2.0 \\
&& python -m pip install --no-cache-dir pyspellchecker==0.6.2 \\
&& python -m pip install --no-cache-dir pycontractions==2.0.1 \\
&& python -m pip install --no-cache-dir tensorflow==2.4.1 \\
&& python -m pip install --no-cache-dir nltk==3.6.2 \\
&& python -m pip install --no-cache-dir -r requirements.txt \\
&& python install.py \\
&& chmod +x start_modelservice.sh
ENTRYPOINT ["./start_modelservice.sh"]
'''
else:
dockerdata+='''RUN apt-get update \\
&& apt-get install -y build-essential manpages-dev \\
&& python -m pip install --no-cache-dir --upgrade pip \\
&& python -m pip install --no-cache-dir pandas==1.2.4 \\
&& python -m pip install --no-cache-dir numpy==1.19.5 \\
&& python -m pip install --no-cache-dir joblib==1.0.1 \\
&& python -m pip install --no-cache-dir Cython==0.29.23 \\
&& python -m pip install --no-cache-dir scipy==1.6.3 \\
&& python -m pip install --no-cache-dir AIX-0.1-py3-none-any.whl \\
&& python -m pip install --no-cache-dir Drift-0.1-py3-none-any.whl \\
&& python -m pip install --no-cache-dir scikit-learn==0.24.2 \\
&& python -m pip install --no-cache-dir -r requirements.txt \\
&& chmod +x start_modelservice.sh
ENTRYPOINT ["./start_modelservice.sh"]
'''
f = open(dockerfile, "w")
f.write(str(dockerdata))
f.close()
requirementdata=''
requirementdata+='word2number==1.1'
if learner_type == 'DL':
requirementdata+='\\n'
requirementdata+='tensorflow==2.5.0'
f = open(requirementfilename, "w")
f.write(str(requirementdata))
f.close()
if textdata:
installfile='''
import nltk
import ssl
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
pass
else:
ssl._create_default_https_context = _create_unverified_https_context
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')'''
f = open(installfilename, "w")
f.write(str(installfile))
f.close()
try:
command = 'docker pull python:3.8-slim-buster'
os.system(command);
#subprocess.check_call(["chmod", "+x", "start_modelservice.sh"], cwd=model_dockerSetup)
subprocess.check_call(["docker", "build", "-t",modelname.lower()+":"+version,"."], cwd=model_dockerSetup)
subprocess.check_call(["docker", "save", "-o",modelname.lower()+"_"+version+".tar",modelname.lower()+":"+version], cwd=model_dockerSetup)
dockerfilepath = os.path.join(model_dockerSetup,modelname.lower()+"_"+version+".tar")
shutil.copyfile(dockerfilepath, os.path.join(filename,modelname.lower()+"_"+version+".tar"))
shutil.rmtree(model_dockerSetup)
return 'Success','SUCCESSFULLY'
except Exception as e:
print("Error: "+str(e))
shutil.rmtree(model_dockerSetup)
return 'Error',str(e)
#createDockerImage(deploymentfolder,modelname,version)<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import subprocess
import os
import glob
import sys
import python_minifier
def encrypt_files(path):
cwd = os.getcwd()
secure_path = os.path.join(path,'SecuredScripts')
try:
if not os.path.exists(secure_path):
os.mkdir(secure_path)
files = [f for f in glob.glob(path + "/*.py")]
for file in files:
#encrypted_file_details[0] = file
#file = files[0]
#print(file)
#filename_w_dir = os.path.splitext(file)
filename_w_ext = os.path.basename(file)
filename, file_extension = os.path.splitext(filename_w_ext)
file_folder_path = os.path.join(secure_path,filename)
#print(file_folder_path)
if not os.path.exists(file_folder_path):
os.mkdir(file_folder_path)
# Minify python source code
minify_file = os.path.join(file_folder_path,filename+'_minify.py')
pythonfolder,_ = os.path.split(sys.executable)
pyminify_script = os.path.join(pythonfolder,'Scripts','pyminify.exe')
minify_command = "\\""+sys.executable+"\\" \\""+pyminify_script+ "\\" \\"" + file + "\\" > \\"" + minify_file+"\\""
subprocess.call(minify_command, shell=True)
# Change directory to folder path
os.chdir(file_folder_path)
# Obfuscate minified file
pyarmor_script = os.path.join(pythonfolder,'Scripts','pyarmor.exe')
obfusc_commmand = "\\""+sys.executable+"\\" \\""+pyarmor_script+"\\" obfuscate \\"" + minify_file+"\\""
#print(obfusc_commmand)
subprocess.call(obfusc_commmand, shell=True)
# Change directory to dist path
obfusc_file = os.path.join(file_folder_path,'dist',filename+'_minify.py')
#print(obfusc_file)
chdirpath = os.path.join(file_folder_path,'dist')
os.chdir(chdirpath)
# Compress obfuscated file
compressed_file = os.path.join(file_folder_path,'dist',filename+'_compressed.py')
#print(compressed_file)
pyminifier_script = os.path.join(pythonfolder,'Scripts','pyminifier.exe')
compress_command = "\\""+sys.executable+"\\" \\""+pyminifier_script+"\\" --gzip -o \\"" +compressed_file + "\\" \\"" + obfusc_file+"\\""
#print(compress_command)
subprocess.call(compress_command, shell=True)
#compile_command = sys.executable+'-m py_compile "' + compressed_file+'"'
#print(compile_command)
#subprocess.call(compile_command , shell=True)
#encrypted_file_details['compiled_file'] = file
#compiled_file = os.path.join(file_folder_path,'dist','__pycache__',filename+'_compressed.cpython-37.pyc')
#encrypted_file_details[1] = compiled_file
#encrypted_file_list.append(encrypted_file_details)
#encrypted_file = filename + '_compressed.cpython-37_encrypted.pyc'
#encrypt_command = "python " + cwd + "\\\\Encrypt_Key_Dcrypt.py " + compiled_file + ' ' + encrypted_file + " --g -e"
#print(encrypt_command)
#subprocess.call(encrypt_command, shell=True)
#encrypted_file_list += ']'
#return(encrypted_file_list)
os.chdir(path)
except OSError as err:
print ("Creation of the directory %s failed "+str(err))
# Driver function
if __name__=="__main__":
path = sys.argv[1]
encrypt_files(path)
#(base) C:\\Himanshu\\DataPreprocessing>pyminify DataPreprocessing.py > DataPreprocessing_minify.py
#Obfuscate
#(base) C:\\Himanshu\\DataPreprocessing>pyarmor obfuscate C:\\Himanshu\\DataPreprocessing\\DataPreprocessing_minify.py
#Compression
#(base) C:\\Himanshu\\DataPreprocessing>pyminifier --gzip -o C:\\Himanshu\\DataPreprocessing\\dist\\DataPreprocessing_compressed.py C:\\Himanshu\\DataPreprocessing\\dist\\DataPreprocessing_minify.py
#(base) C:\\Himanshu\\DataPreprocessing>cd dist
#(base) C:\\Himanshu\\DataPreprocessing\\dist>python DataPreprocessing_compressed.py "DocumentText" "Label" 90 ".csv" "C:\\Himanshu\\DataAcquisition\\ClassificationDataNewBalanced.csv"
#Compiling compressed .py to .pyc file
#(base) C:\\Himanshu\\DataPreprocessing\\dist>python -m py_compile DataPreprocessing_compressed.py
#Encrypt .pyc file
#(base) C:\\Himanshu\\DataPreprocessing\\dist>python C:\\Himanshu\\Encrypt_Key_Dcrypt.py C:\\Himanshu\\DataPreprocessing\\dist\\__pycache__\\DataPreprocessing_compressed.cpython-36.pyc DataPreprocessing_compressed.cpython-36_encrypted.pyc --g -e
#Decrypt file
#(base) C:\\Himanshu\\DataPreprocessing\\dist>python C:\\Himanshu\\Encrypt_Key_Dcrypt.py DataPreprocessing_compressed.cpython-36_encrypted.pyc DataPreprocessing_compressed.cpython-36_decrypted.pyc --d
#Run decrypted file
#(base) C:\\Himanshu\\DataPreprocessing\\dist>python DataPreprocessing_compressed.cpython-36_decrypted.pyc "DocumentText" "Label" 90 ".csv" "C:\\Himanshu\\DataAcquisition\\ClassificationDataNewBalanced.csv"<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import platform
import sys
import subprocess
import glob
import shutil
import time
from aion_deployment.EncryptPythonSourceCode import encrypt_files
import json
def encrypt(alldirs):
for dir in alldirs:
try:
encrypt_files(dir)
except Exception as error_obj:
print("Exception in encrypting", error_obj)
print("-"*50)
def replace_by_compressed(alldirs):
for dir in alldirs:
try:
#print("Processing dir", dir)
files = [f for f in glob.glob(dir + "/*.py")]
secure_path = os.path.join(dir, 'SecuredScripts')
time.sleep(6)
for file in files:
try:
filename_w_ext = os.path.basename(file)
filename, file_extension = os.path.splitext(filename_w_ext)
if filename == "__init__":
continue
#print("Processing file", file)
file_folder_path = os.path.join(secure_path, filename, 'dist')
compressed_file_path = os.path.join(file_folder_path, filename+'_compressed.py')
shutil.copy(compressed_file_path, dir)
os.remove(file)
new_compressed_file_path = os.path.join(dir, filename+'_compressed.py')
target_file_path = os.path.join(dir, filename_w_ext)
os.rename(new_compressed_file_path, target_file_path)
if filename == 'aion_prediction':
shutil.copytree(os.path.join(file_folder_path |
, 'pytransform'), os.path.join(dir, 'pytransform'))
except Exception as error_obj:
print("Exception in file ", error_obj)
shutil.rmtree(secure_path)
except Exception as error_obj:
print("Exception in dir ", error_obj)
def start_Obfuscate(path):
project_path = path
subdirs = [dI for dI in os.listdir(project_path) if os.path.isdir(os.path.join(project_path,dI))]
alldirs = [
project_path,
]
for subdir in subdirs:
if(subdir != 'pytransform'):
alldirs.append(os.path.join(project_path, subdir))
encrypt(alldirs)
replace_by_compressed(alldirs)
if __name__=="__main__":
project_path = sys.argv[1]
print("project_path", project_path)
subdirs = [dI for dI in os.listdir(project_path) if os.path.isdir(os.path.join(project_path,dI))]
alldirs = [
project_path,
]
for subdir in subdirs:
alldirs.append(os.path.join(project_path, subdir))
encrypt(alldirs)
print("*"*50)
replace_by_compressed(alldirs)
# python eion_compress.py "C:\\Users\\ashwani.s\\Desktop\\22April\\22April\\Mohita" "C:\\Users\\ashwani.s\\Desktop\\eion\\eion" > logfile.log
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import pandas as pd
import numpy as np
import scipy
import warnings
import scipy.stats as st
import logging
import json
class inputdrift():
def __init__(self,conf):
self.log = logging.getLogger('eion')
def get_input_drift(self,ndf,hdf,outputfolder):
selectedColumns = self.features.split(',')
dataalertcount=0
distributionChangeColumns=""
distributionChangeMessage=[]
for i in range(0,len(selectedColumns)):
data1=hdf[selectedColumns[i]]
data2=ndf[selectedColumns[i]]
if(data1.dtype !="str" and data2.dtype !="str" ):
cumulativeData=data1.append(data2)
teststaticValue=teststatic(self,data1,data2)
if (teststaticValue < 0.05):
distributionName1,sse1=DistributionFinder(self,data1)
distributionName2,sse2=DistributionFinder(self,data2)
if(distributionName1 == distributionName2):
dataalertcount = dataalertcount
else:
dataalertcount = dataalertcount+1
distributionChangeColumns=distributionChangeColumns+selectedColumns[i]+","
changedColumn = {}
changedColumn['Feature'] = selectedColumns[i]
changedColumn['KS_Training'] = teststaticValue
changedColumn['Training_Distribution'] = distributionName1
changedColumn['New_Distribution'] = distributionName2
distributionChangeMessage.append(changedColumn)
else :
dataalertcount = dataalertcount
else :
response ="Selected Columns should be Numerical Values"
if(dataalertcount == 0):
resultStatus="Model is working as expected"
else :
resultStatus=json.dumps(distributionChangeMessage)
return(dataalertcount,resultStatus)
def DistributionFinder(self,data):
try:
distributionName =""
sse =0.0
KStestStatic=0.0
dataType=""
if(data.dtype == "float64"):
dataType ="Continuous"
elif(data.dtype =="int"):
dataType="Discrete"
elif(data.dtype =="int64"):
dataType="Discrete"
if(dataType == "Discrete"):
distributions= [st.bernoulli,st.binom,st.geom,st.nbinom,st.poisson]
index, counts = np.unique(data.astype(int),return_counts=True)
if(len(index)>=2):
best_sse = np.inf
y1=[]
total=sum(counts)
mean=float(sum(index*counts))/total
variance=float((sum(index**2*counts) -total*mean**2))/(total-1)
dispersion=mean/float(variance)
theta=1/float(dispersion)
r=mean*(float(theta)/1-theta)
for j in counts:
y1.append(float(j)/total)
pmf1=st.bernoulli.pmf(index,mean)
pmf2=st.binom.pmf(index,len(index),p=mean/len(index))
pmf3=st.geom.pmf(index,1/float(1+mean))
pmf4=st.nbinom.pmf(index,mean,r)
pmf5=st.poisson.pmf(index,mean)
sse1 = np.sum(np.power(y1 - pmf1, 2.0))
sse2 = np.sum(np.power(y1 - pmf2, 2.0))
sse3 = np.sum(np.power(y1 - pmf3, 2.0))
sse4 = np.sum(np.power(y1 - pmf4, 2.0))
sse5 = np.sum(np.power(y1- pmf5, 2.0))
sselist=[sse1,sse2,sse3,sse4,sse5]
for i in range(0,len(sselist)):
if best_sse > sselist[i] > 0:
best_distribution = distributions[i].name
best_sse = sselist[i]
elif (len(index) == 1):
best_distribution = "Constant Data-No Distribution"
best_sse = 0.0
distributionName =best_distribution
sse=best_sse
elif(dataType == "Continuous"):
distributions = [st.uniform,st.expon,st.weibull_max,st.weibull_min,st.chi,st.norm,st.lognorm,st.t,st.gamma,st.beta]
best_distribution = st.norm.name
best_sse = np.inf
datamin=data.min()
datamax=data.max()
nrange=datamax-datamin
y, x = np.histogram(data.astype(float), bins='auto', density=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
for distribution in distributions:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
params = distribution.fit(data.astype(float))
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Calculate fitted PDF and error with fit in distribution
pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)
sse = np.sum(np.power(y - pdf, 2.0))
if(best_sse >sse > 0):
best_distribution = distribution.name
best_sse = sse
distributionName =best_distribution
sse=best_sse
except:
response = str(sys.exc_info()[0])
message='Job has Failed'+response
print(message)
return distributionName,sse
##KStestStatic -pvalue finding
def teststatic(self,data1,data2):
try:
teststatic =st.ks_2samp(data1,data2)
pValue=0.0
scipyVersion =scipy.__version__
if(scipyVersion <= "0.14.1"):
pValue =teststatic[1]
else:
pValue =teststatic.pvalue
except:
response = str(sys.exc_info()[0])
print("Input Drift Job Failed "+response)
return pValue
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
from pathlib import Path
from AION.prediction_package.imports import importModule
from AION.prediction_package.aion_prediction import aionPrediction
from AION.prediction_package.utility import TAB_CHAR
from AION.prediction_package import utility
from AION.prediction_package.base import deployer
from AION.prediction_package import common
import numpy as np
def get_deployer( params):
if params['training']['algo'] == 'ARIMA':
return arima(params)
elif params['training']['algo'] == 'LSTM':
return lstm(params)
elif params['training']['algo'] == 'ENCODER_DECODER_LSTM_MVI_UVO':
return lstmencdec_mviuvo(params)
elif params['training']['algo'] == 'MLP':
return mlp(params)
elif params['training']['algo'] == 'VAR':
return var(params)
elif params['training']['algo'] == 'FBPROPHET':
return fbprophet(params)
else:
raise ValueError(f"Algorithm {params['training']['algo']} for time series forecasting is not supported")
def _profiler_code(params, importer):
"""
This will create the profiler file based on the config file.
separated file is created as profiler is required for input drift also.
"""
imported_modules = [
{'module': 'json', 'mod_from': None, 'mod_as': None},
{'module': 'scipy', 'mod_from': None, 'mod_as': None},
{'module': 'joblib', 'mod_from': None, 'mod_as': None},
{'module': 'numpy', 'mod_from': None, 'mod_as': 'np'},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}
]
utility.import_modules(importer, imported_modules)
if 'code' in params['profiler'].get('preprocess',{}).keys():
code = params['profiler']['preprocess']['code']
else:
code = ""
code += """
class inputprofiler():
"""
init_code = """
def __init__(self):
"""
init_code += """
# preprocessing
preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl'
if not preprocess_path.exists():
raise ValueError(f'Preprocess model file not found: {preprocess_path}')
self.profiler = joblib.load(preprocess_path)
"""
run_code = """
def run(self,df):
df = df.replace(r'^\\s*$', np.NaN, regex=True)
"""
if 'code' in params['profiler'].get('preprocess',{}).keys():
run_code += """
df = preprocess( df)"""
if params['profiler'].get('unpreprocessed_columns'):
run_code += f"""
unpreprocessed_data = df['{params['profiler']['unpreprocessed_columns'][0]}']
df.drop(['{params['profiler']['unpreprocessed_columns'][0]}'], axis=1,inplace=True)
"""
if params['profiler'].get('force_numeric_conv'):
run_code += f"""
df[{params['profiler']['force_numeric_conv']}] = df[{params['profiler']['force_numeric_conv']}].apply(pd.to_numeric,errors='coerce')"""
run_code += _profiler_main_code(params)
if params['profiler'].get('unpreprocessed_columns'):
run_code += f"""
df['{params['profiler'].get('unpreprocessed_columns')[0]}'] = unpreprocessed_data
"""
run_code += """ return df
"""
utility.import_modules(importer, imported_modules)
import_code = importer.getCode()
return import_code + code + init_code + run_code
def _profiler_main_code(params):
code = f"""
df = self.profiler.transform(df)
columns = {params['profiler']['output_features']}
if isinstance(df, scipy.sparse.spmatrix):
df = pd.DataFrame(df.toarray(), columns=columns)
else:
df = pd.DataFrame(df, columns=columns)
"""
return code
class arima( deployer):
def __init__(self, params={}):
super().__init__( params)
self.name = 'timeseriesforecasting'
def profiler_code( self |
):
imported_modules = [
{'module': 'numpy', 'mod_from': None, 'mod_as': 'np'},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
]
importer = importModule()
utility.import_modules(importer, imported_modules)
code = """
class inputprofiler():
def __init__(self):
pass
def run( self,df):
df = df.replace(r'^\\s*$', np.NaN, regex=True)
return df[['noofforecasts']]
"""
return importer.getCode() + code
def feature_engg_code(self):
self.importer.addModule(module='pandas',mod_as='pd')
return f"""
class selector():
def __init__(self):
pass
def run(self, df):
return df
"""
def training_code( self):
self.importer.addModule(module='pandas',mod_as='pd')
self.importer.addModule(module='Path',mod_from='pathlib')
self.importer.addModule(module='numpy',mod_as='np')
self.importer.addModule(module='joblib')
return f"""
class trainer():
def __init__(self):
model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}"
if not model_file.exists():
raise ValueError(f'Trained model file not found: {{model_file}}')
self.model = joblib.load(model_file)
def run(self,df):
return self.model.predict(n_periods=int(df["noofforecasts"][0]))
"""
def formatter_code(self):
self.importer.addModule('json')
self.importer.addModule('pandas', mod_as='pd')
return """
class output_format():
def __init__( self):
pass
def run(self,raw_df,df):
df = df.round(2)
df = json.dumps(df.tolist())
outputjson = {"status":"SUCCESS","data":eval(df)}
return(json.dumps(outputjson))
"""
class lstm( deployer):
def __init__(self, params={}):
super().__init__( params)
self.name = 'timeseriesforecasting'
def profiler_code(self):
importer = importModule()
return _profiler_code( self.params, importer)
def training_code( self):
self.importer.addModule(module='pandas',mod_as='pd')
self.importer.addModule(module='Path',mod_from='pathlib')
code = f"""
class trainer():
"""
init_code, run_code = self._get_train_code()
return code + init_code + run_code
def _get_train_code(self):
self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models')
init_code = f"""
def __init__( self):
model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}"
if not model_file.exists():
raise ValueError(f'Trained model file not found: {{model_file}}')
self.model = load_model(model_file)
"""
run_code = f"""
def run(self, df):
lag_order={self.params['training']['lag_order']}
xt = df.values
scaler_file = (Path(__file__).parent/"model")/"{self.params['training']['scaler_file']}"
if not scaler_file.exists():
raise ValueError(f'Scaling file not found: {{scaler_file}}')
loaded_scaler_model = joblib.load(scaler_file)
xt = xt.astype('float32')
xt = loaded_scaler_model.transform(xt)
noOfPredictions = 10
pred_data = xt
y_future = []
for i in range(noOfPredictions):
"""
if len(self.params['selector']['output_features']) == 1:
run_code += f"""
pred_data = pred_data[-lag_order:]
pred_data = pred_data.reshape((1,lag_order,1))
pred = self.model.predict(pred_data)
predoutput = loaded_scaler_model.inverse_transform(pred)
y_future.append(predoutput.flatten()[-1])
pred_data = np.append(pred_data,pred)
pred = pd.DataFrame(index=range(0,len(y_future)),columns={self.params['selector']['output_features']})
for i in range(0, len(y_future)):
pred.iloc[i] = y_future[i]
return pred
"""
else:
run_code += f"""
pdata = pred_data[-lag_order:]
pdata = pdata.reshape((1,lag_order,{len(self.params['selector']['output_features'])}))
pred = self.model.predict(pdata)
predoutput = loaded_scaler_model.inverse_transform(pred)
y_future.append(predoutput)
pred_data = np.append(pred_data,pred,axis=0)
pred = pd.DataFrame(index=range(0,len(y_future)),columns={self.params['selector']['output_features']})
for i in range(0, len(y_future)):
pred.iloc[i] = y_future[i]
return pred
"""
return init_code, run_code
def formatter_code(self):
self.importer.addModule('json')
self.importer.addModule('pandas', mod_as='pd')
return """
class output_format():
def __init__( self):
pass
def run(self,raw_df,df):
df = df.round(2)
df = df.to_json(orient='records')
outputjson = {"status":"SUCCESS","data":json.loads(df)}
return(json.dumps(outputjson))
"""
class lstmencdec_mviuvo( deployer):
def __init__(self, params={}):
super().__init__( params)
self.name = 'timeseriesforecasting'
outputFeatrues = params['profiler']['output_features']
self.targetColIndx = outputFeatrues.index(params['features']['target_feat'])
selectedColDict = params['selector']['output_features']
self.selectedCols = list()
for col in selectedColDict:
self.selectedCols.append(col)
def profiler_code(self):
importer = importModule()
return _profiler_code( self.params, importer)
def training_code( self):
self.importer.addModule(module='pandas',mod_as='pd')
self.importer.addModule(module='Path',mod_from='pathlib')
code = f"""
class trainer():
"""
init_code, run_code = self._get_train_code()
return code + init_code + run_code
def _get_train_code(self):
self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models')
init_code = f"""
def __init__( self):
model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}"
if not model_file.exists():
raise ValueError(f'Trained model file not found: {{model_file}}')
self.model = load_model(model_file)
"""
run_code = f"""
def run(self, df):
targetColIndx = {self.targetColIndx}
lag_order={self.params['training']['lag_order']}
xt = df.values
scaler_file = (Path(__file__).parent/"model")/"{self.params['training']['scaler_file']}"
if not scaler_file.exists():
raise ValueError(f'Scaling file not found: {{scaler_file}}')
loaded_scaler_model = joblib.load(scaler_file)
xt = xt.astype('float32')
xt = loaded_scaler_model.transform(xt)
noOfPredictions = 10
pred_data = xt
y_future = []
pdata = pred_data[-lag_order:]
pdata = pdata.reshape((1,lag_order,{len(self.params['selector']['output_features'])}))
pred = self.model.predict(pdata)
pred_1d = pred.ravel()
pdata_2d = pdata.ravel().reshape(len(pdata) * lag_order, {len(self.params['selector']['output_features'])})
pdata_2d[:,targetColIndx] = pred_1d
pred_2d_inv = loaded_scaler_model.inverse_transform(pdata_2d)
predout = pred_2d_inv[:, targetColIndx]
predout = predout.reshape(len(pred_1d),1)
pred = pd.DataFrame(index=range(0,len(predout)),columns=['{self.params['features']['target_feat']}'])
for i in range(0, len(predout)):
pred.iloc[i] = predout[i]
return pred
"""
return init_code, run_code
def feature_engg_code(self):
self.importer.addModule(module='pandas',mod_as='pd')
return f"""
class selector():
def __init__(self):
pass
def run(self, df):
return df[{self.selectedCols}]
"""
def formatter_code(self):
self.importer.addModule('json')
self.importer.addModule('pandas', mod_as='pd')
return """
class output_format():
def __init__( self):
pass
def run(self,raw_df,df):
df = df.round(2)
df = df.to_json(orient='records')
outputjson = {"status":"SUCCESS","data":json.loads(df)}
return(json.dumps(outputjson))
"""
class mlp( lstm):
def __init__(self, params={}):
super().__init__( params)
self.name = 'timeseriesforecasting'
def training_code( self):
self.importer.addModule(module='pandas',mod_as='pd')
self.importer.addModule(module='Path',mod_from='pathlib')
code = f"""
class trainer():
"""
init_code, run_code = self._get_train_code()
return code + init_code + run_code
def _get_train_code(self):
self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models')
init_code = f"""
def __init__( self):
model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}"
if not model_file.exists():
raise ValueError(f'Trained model file not found: {{model_file}}')
self.model = load_model(model_file)"""
run_code = f"""
def run(self, df):
lag_order={self.params['training']['lag_order']}
xt = df.values
scaler_file = (Path(__file__).parent/"model")/"{self.params['training']['scaler_file']}"
if not scaler_file.exists():
raise ValueError(f'Scaling file not found: {{scaler_file}}')
loaded_scaler_model = joblib.load(scaler_file)
xt = xt.astype('float32')
xt = loaded_scaler_model.transform(xt)
noOfPredictions = 10
pred_data = xt
y_future = []
for i in range(noOfPredictions):
"""
if len(self.params['selector']['output_features']) == 1:
run_code += f"""
pred_data = pred_data[-lag_order:]
pred_data = pred_data.reshape((1,lag_order))
pred = self.model.predict(pred_data)
predoutput = loaded_scaler_model.inverse_transform(pred)
y_future.append(predoutput.flatten()[-1])
pred_data = np.append(pred_data,pred)
pred = pd.DataFrame(index=range(0,len(y_future)),columns={self.params['selector']['output_features']})
for i in range(0, len(y_future)):
pred.iloc[i] = y_future[i]
return pred
"""
else:
run_code += f"""
pdata = pred_data[-lag_order:]
pdata = pdata.reshape((1,lag_order,{len(self.params['selector']['output_features'])}))
pred = self.model.predict(pdata)
predoutput = loaded_scaler_model.inverse_transform(pred)
y_future.append(predoutput)
pred_data = np.append(pred_data,pred,axis=0)
pred = pd.DataFrame(index=range(0,len(y_future)),columns={self.params['selector']['output_features']})
for i in range(0, len(y_future)):
pred.iloc[i] = y_future[i]
return pred
"""
return init_code, run_code
class var( deployer):
def __init__(self, params={}):
super().__init__( params)
self.name = 'timeseriesforecasting'
def profiler_code(self):
importer = importModule()
code = _profiler_code( self.params, importer)
return code
def feature_engg_code(self):
self.importer.addModule(module='pandas',mod_as='pd')
return f"""
class selector():
def __init__(self):
pass
def run(self, df):
return df[{self.params['selector']['output_features']}]
"""
def training_code( self):
self.importer.addModule(module='joblib')
return f"""
class trainer():
def __init__( self):
model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}"
if not model_file.exists():
raise ValueError(f'Trained model file not found: {{model_file}}')
self.model = joblib.load(model_file)
def run(self,df):
lag_order = self.model.k_ar
return self.model.forecast(df.values[-lag_order:],steps={self.params['training']['no_of_prediction']})
"""
def formatter_code(self):
self.importer.addModule('json')
self.importer.addModule('pandas', mod_as='pd')
return f"""
class output_format():
def __init__( self):
pass
def invertTransformation(self,predictions):
datasetdf = pd.read_csv(( |
Path(__file__).parent/"data")/"trainingdata.csv")
dictDiffCount = {self.params['training']['dictDiffCount']}
target_features = "{self.params['features']['target_feat']}"
columns = target_features.split(',')
pred = pd.DataFrame(index=range(0,len(predictions)),columns=columns)
for j in range(0,len(columns)):
for i in range(0, len(predictions)):
pred.iloc[i][j] = round(predictions[i][j],2)
prediction = pred
for col in columns:
if col in dictDiffCount:
if dictDiffCount[col]==2:
prediction[col] = (datasetdf[col].iloc[-1]-datasetdf[col].iloc[-2]) + prediction[col].cumsum()
prediction[col] = datasetdf[col].iloc[-1] + prediction[col].cumsum()
prediction = pred
return(prediction)
def run(self,raw_df,df):
df = self.invertTransformation(df)
df = df.to_json(orient='records',double_precision=2)
outputjson = {{"status":"SUCCESS","data":json.loads(df)}}
return(json.dumps(outputjson))
"""
class fbprophet( deployer):
def __init__(self, params={}):
super().__init__( params)
self.name = 'timeseriesforecasting'
def profiler_code( self):
imported_modules = [
{'module': 'numpy', 'mod_from': None, 'mod_as': 'np'},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
]
importer = importModule()
utility.import_modules(importer, imported_modules)
code = """
class inputprofiler():
def __init__(self):
pass
def run( self,df):
df = df.replace(r'^\\s*$', np.NaN, regex=True)
return df[['noofforecasts']]
"""
return importer.getCode() + code
def feature_engg_code(self):
self.importer.addModule(module='pandas',mod_as='pd')
return f"""
class selector():
def __init__(self):
pass
def run(self, df):
return df
"""
def training_code( self):
self.importer.addModule(module='pandas',mod_as='pd')
self.importer.addModule(module='Path',mod_from='pathlib')
self.importer.addModule(module='joblib')
code = f"""
class trainer():
def __init__(self):
model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}"
if not model_file.exists():
raise ValueError(f'Trained model file not found: {{model_file}}')
self.model = joblib.load(model_file)
"""
code += f"""
def run(self,df):
sessonal_freq = '{self.params['training']['sessonal_freq']}'
ts_prophet_future = self.model.make_future_dataframe(periods=int(df["noofforecasts"][0]),freq=sessonal_freq,include_history = False)
"""
if (self.params['training']['additional_regressors']):
code += f"""
additional_regressors={self.params['training']['additional_regressors']}
ts_prophet_future[additional_regressors] = dataFrame[additional_regressors]
ts_prophet_future.reset_index(drop=True)
ts_prophet_future=ts_prophet_future.dropna()
"""
code += """
train_forecast = self.model.predict(ts_prophet_future)
prophet_forecast_tail=train_forecast[[\\'ds\\', \\'yhat\\', \\'yhat_lower\\',\\'yhat_upper\\']].tail( int(df["noofforecasts"][0]))
return(prophet_forecast_tail)"""
return code
def formatter_code(self):
self.importer.addModule('json')
self.importer.addModule('pandas', mod_as='pd')
return """
class output_format():
def __init__( self):
pass
def run(self,raw_df,df):
df = df.to_json(orient='records')
outputjson = {"status":"SUCCESS","data":json.loads(df)}
return(json.dumps(outputjson))
"""
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import warnings
warnings.filterwarnings("ignore")
import json
import os
import sys
import pandas as pd
from pandas import json_normalize
#from selector import selector
#from inputprofiler import inputprofiler
#from trained_model import trained_model
#from output_format import output_format
from autogluon.tabular import TabularDataset, TabularPredictor
from autogluon.core.utils.utils import setup_outputdir
from autogluon.core.utils.loaders import load_pkl
from autogluon.core.utils.savers import save_pkl
import os.path
class MultilabelPredictor():
""" Tabular Predictor for predicting multiple columns in table.
Creates multiple TabularPredictor objects which you can also use individually.
You can access the TabularPredictor for a particular label via: `multilabel_predictor.get_predictor(label_i)`
Parameters
----------
labels : List[str]
The ith element of this list is the column (i.e. `label`) predicted by the ith TabularPredictor stored in this object.
path : str
Path to directory where models and intermediate outputs should be saved.
If unspecified, a time-stamped folder called "AutogluonModels/ag-[TIMESTAMP]" will be created in the working directory to store all models.
Note: To call `fit()` twice and save all results of each fit, you must specify different `path` locations or don't specify `path` at all.
Otherwise files from first `fit()` will be overwritten by second `fit()`.
Caution: when predicting many labels, this directory may grow large as it needs to store many TabularPredictors.
problem_types : List[str]
The ith element is the `problem_type` for the ith TabularPredictor stored in this object.
eval_metrics : List[str]
The ith element is the `eval_metric` for the ith TabularPredictor stored in this object.
consider_labels_correlation : bool
Whether the predictions of multiple labels should account for label correlations or predict each label independently of the others.
If True, the ordering of `labels` may affect resulting accuracy as each label is predicted conditional on the previous labels appearing earlier in this list (i.e. in an auto-regressive fashion).
Set to False if during inference you may want to individually use just the ith TabularPredictor without predicting all the other labels.
kwargs :
Arguments passed into the initialization of each TabularPredictor.
"""
multi_predictor_file = 'multilabel_predictor.pkl'
def __init__(self, labels, path, problem_types=None, eval_metrics=None, consider_labels_correlation=True, **kwargs):
if len(labels) < 2:
raise ValueError("MultilabelPredictor is only intended for predicting MULTIPLE labels (columns), use TabularPredictor for predicting one label (column).")
self.path = setup_outputdir(path, warn_if_exist=False)
self.labels = labels
self.consider_labels_correlation = consider_labels_correlation
self.predictors = {} # key = label, value = TabularPredictor or str path to the TabularPredictor for this label
if eval_metrics is None:
self.eval_metrics = {}
else:
self.eval_metrics = {labels[i] : eval_metrics[i] for i in range(len(labels))}
problem_type = None
eval_metric = None
for i in range(len(labels)):
label = labels[i]
path_i = self.path + "Predictor_" + label
if problem_types is not None:
problem_type = problem_types[i]
if eval_metrics is not None:
eval_metric = self.eval_metrics[i]
self.predictors[label] = TabularPredictor(label=label, problem_type=problem_type, eval_metric=eval_metric, path=path_i, **kwargs)
def fit(self, train_data, tuning_data=None, **kwargs):
""" Fits a separate TabularPredictor to predict each of the labels.
Parameters
----------
train_data, tuning_data : str or autogluon.tabular.TabularDataset or pd.DataFrame
See documentation for `TabularPredictor.fit()`.
kwargs :
Arguments passed into the `fit()` call for each TabularPredictor.
"""
if isinstance(train_data, str):
train_data = TabularDataset(train_data)
if tuning_data is not None and isinstance(tuning_data, str):
tuning_data = TabularDataset(tuning_data)
train_data_og = train_data.copy()
if tuning_data is not None:
tuning_data_og = tuning_data.copy()
save_metrics = len(self.eval_metrics) == 0
for i in range(len(self.labels)):
label = self.labels[i]
predictor = self.get_predictor(label)
if not self.consider_labels_correlation:
labels_to_drop = [l for l in self.labels if l!=label]
else:
labels_to_drop = [labels[j] for j in range(i+1,len(self.labels))]
train_data = train_data_og.drop(labels_to_drop, axis=1)
if tuning_data is not None:
tuning_data = tuning_data_og.drop(labels_to_drop, axis=1)
print(f"Fitting TabularPredictor for label: {label} ...")
predictor.fit(train_data=train_data, tuning_data=tuning_data, **kwargs)
self.predictors[label] = predictor.path
if save_metrics:
self.eval_metrics[label] = predictor.eval_metric
self.save()
def predict(self, data, **kwargs):
""" Returns DataFrame with label columns containing predictions for each label.
Parameters
----------
data : str or autogluon.tabular.TabularDataset or pd.DataFrame
Data to make predictions for. If label columns are present in this data, they will be ignored. See documentation for `TabularPredictor.predict()`.
kwargs :
Arguments passed into the predict() call for each TabularPredictor.
"""
return self._predict(data, as_proba=False, **kwargs)
def predict_proba(self, data, **kwargs):
""" Returns dict where each key is a label and the corresponding value is the `predict_proba()` output for just that label.
Parameters
----------
data : str or autogluon.tabular.TabularDataset or pd.DataFrame
Data to make predictions for. See documentation for `TabularPredictor.predict()` and `TabularPredictor.predict_proba()`.
kwargs :
Arguments passed into the `predict_proba()` call for each TabularPredictor (also passed into a `predict()` call).
"""
return self._predict(data, as_proba=True, **kwargs)
def evaluate(self, data, **kwargs):
""" Returns dict where each key is a label and the corresponding value is the `evaluate()` output for just that label.
Parameters
----------
data : str or autogluon.tabular.TabularDataset or pd.DataFrame
Data to evalate predictions of all labels for, must contain all labels as columns. See documentation for `TabularPredictor.evaluate()`.
kwargs :
Arguments passed into the `evaluate()` call for each TabularPredictor (also passed into the `predict()` call).
"""
data = self._get_data(data)
eval_dict = {}
for label in self.labels:
print(f"Evaluating TabularPredictor for label: {label} ...")
predictor = self.get_predictor(label)
eval_dict[label] = predictor.evaluate(data, **kwargs)
if self.consider_labels_correlation:
data[label] = predictor.predict(data, **kwargs)
return eval_dict
def save(self):
""" Save MultilabelPredictor to disk. """
for label in self.labels:
if not isinstance(self.predictors[label], str):
self.predictors[label] = self.predictors[label].path
save_pkl.save(path=self.path+self.multi_predictor_file, object=self)
print(f"MultilabelPredictor saved to disk. Load with: MultilabelPredictor.load('{self.path}')")
@classmethod
def load(cls, path):
""" Load MultilabelPredictor from disk `path` previously specified when creating this MultilabelPredictor. """
path = os.path.expanduser(path)
if path[-1] != os.path.sep:
path = path + os.path.sep
return load_pkl.load(path=path+cls.multi_predictor_file)
def get_predictor(self, label):
""" Returns TabularPredictor which is used to predict this label. """
predictor = self.predictors[label]
if isinstance(predictor, str):
return TabularPredictor.load(path=predictor)
return predictor
def _get_data(self, data):
if isinstance(data, str):
return TabularDataset(data)
return data.copy()
def _predict(self, data, as_proba=False, **kwargs):
data = self._get_data(data)
if as_proba:
predproba_dict = {}
for label in self.labels:
print(f"Predicting with TabularPredictor for label: {label} ...")
predictor = self.get_predictor(label)
if as_proba:
predproba_dict[label] = predictor.predict_proba(data, as_multiclass=True, **kwargs)
data[label] = predictor.predict(data, ** |
kwargs)
if not as_proba:
return data[self.labels]
else:
return predproba_dict
def predict(data):
try:
if os.path.splitext(data)[1] == ".tsv":
df=pd.read_csv(data,encoding='utf-8',sep='\\t')
elif os.path.splitext(data)[1] == ".csv":
df=pd.read_csv(data,encoding='utf-8')
else:
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
df = json_normalize(jsonData)
#df0 = df.copy()
#profilerobj = inputprofiler()
#df = profilerobj.apply_profiler(df)
#selectobj = selector()
#df = selectobj.apply_selector(df)
#modelobj = trained_model()
#output = modelobj.predict(df,"")
# Load the Test data for Prediction
# ----------------------------------------------------------------------------#
test_data = df#TabularDataset(data) #'testingDataset.csv'
#subsample_size = 2
# ----------------------------------------------------------------------------#
# Specify the corresponding target features to be used
# ----------------------------------------------------------------------------#
#labels = ['education-num','education','class']
configFile = os.path.join(os.path.dirname(os.path.abspath(__file__)),'etc','predictionConfig.json')
with open(configFile, 'rb') as cfile:
data = json.load(cfile)
labels = data['targetFeature']
# ----------------------------------------------------------------------------#
for x in labels:
if x in list(test_data.columns):
test_data.drop(x,axis='columns', inplace=True)
# ----------------------------------------------------------------------------#
#test_data = test_data.sample(n=subsample_size, random_state=0)
#print(test_data)
#test_data_nolab = test_data.drop(columns=labels)
#test_data_nolab.head()
test_data_nolab = test_data
# ----------------------------------------------------------------------------#
# Load the trained model from where it's stored
# ----------------------------------------------------------------------------#
model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'ModelPath')
multi_predictor = MultilabelPredictor.load(model_path)
# ----------------------------------------------------------------------------#
# Start the prediction and perform the evaluation
# ----------------------------------------------------------------------------#
predictions = multi_predictor.predict(test_data_nolab)
for label in labels:
df[label+'_predict'] = predictions[label]
#evaluations = multi_predictor.evaluate(test_data)
#print(evaluations)
#print("Evaluated using metrics:", multi_predictor.eval_metrics)
# ----------------------------------------------------------------------------#
# ----------------------------------------------------------------------------#
#outputobj = output_format()
#output = outputobj.apply_output_format(df0,output)
outputjson = df.to_json(orient='records')
outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}
output = json.dumps(outputjson)
print("predictions:",output)
return(output)
# ----------------------------------------------------------------------------#
except KeyError as e:
output = {"status":"FAIL","message":str(e).strip('"')}
print("predictions:",json.dumps(output))
return (json.dumps(output))
except Exception as e:
output = {"status":"FAIL","message":str(e).strip('"')}
print("predictions:",json.dumps(output))
return (json.dumps(output))
if __name__ == "__main__":
output = predict(sys.argv[1])
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import warnings
warnings.filterwarnings("ignore")
import json
import os
import sys
import pandas as pd
import numpy as np
from pandas import json_normalize
from autogluon.text import TextPredictor
import os.path
def predict(data):
try:
if os.path.splitext(data)[1] == ".tsv":
df=pd.read_csv(data,encoding='utf-8',sep='\\t')
elif os.path.splitext(data)[1] == ".csv":
df=pd.read_csv(data,encoding='utf-8')
else:
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
df = json_normalize(jsonData)
model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'text_prediction')
predictor = TextPredictor.load(model_path)
predictions = predictor.predict(df)
df['predict'] = predictions
outputjson = df.to_json(orient='records')
outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}
output = json.dumps(outputjson)
print("predictions:",output)
return(output)
except KeyError as e:
output = {"status":"FAIL","message":str(e).strip('"')}
print("predictions:",json.dumps(output))
return (json.dumps(output))
except Exception as e:
output = {"status":"FAIL","message":str(e).strip('"')}
print("predictions:",json.dumps(output))
return (json.dumps(output))
if __name__ == "__main__":
output = predict(sys.argv[1])
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import logging
logging.getLogger('tensorflow').disabled = True
import json
import mlflow
import mlflow.sklearn
import mlflow.sagemaker as mfs
# from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
# from sklearn import datasets
import time
import numpy as np
# Load dataset
# from sklearn.datasets import load_iris
import pickle
# Load the pickled model
# from matplotlib import pyplot
import sys
import os
import boto3
import subprocess
import os.path
from os.path import expanduser
import platform
from pathlib import Path
class aionMlopsService:
def __init__(self,model,mlflowtosagemakerDeploy,mlflowtosagemakerPushOnly,mlflowtosagemakerPushImageName,mlflowtosagemakerdeployModeluri,experiment_name,mlflow_modelname,awsaccesskey_id,awssecretaccess_key,aws_session_token,mlflow_container_name,aws_region,aws_id,iam_sagemakerfullaccess_arn,sm_app_name,sm_deploy_option,delete_ecr_repository,ecrRepositoryName):
try:
self.model=model
self.mlflowtosagemakerDeploy=mlflowtosagemakerDeploy
self.mlflowtosagemakerPushOnly=str(mlflowtosagemakerPushOnly)
self.mlflowtosagemakerPushImageName=str(mlflowtosagemakerPushImageName)
self.mlflowtosagemakerdeployModeluri=str(mlflowtosagemakerdeployModeluri)
self.experiment_name=experiment_name
self.mlflow_modelname=mlflow_modelname
self.awsaccesskey_id=awsaccesskey_id
self.awssecretaccess_key=awssecretaccess_key
self.aws_session_token=aws_session_token
self.mlflow_container_name=mlflow_container_name
self.aws_region=aws_region
self.aws_id=aws_id
self.iam_sagemakerfullaccess_arn=iam_sagemakerfullaccess_arn
self.sm_app_name=sm_app_name
self.sm_deploy_option=sm_deploy_option
self.delete_ecr_repository=delete_ecr_repository
self.ecrRepositoryName=ecrRepositoryName
from appbe.dataPath import LOG_LOCATION
sagemakerLogLocation = LOG_LOCATION
try:
os.makedirs(sagemakerLogLocation)
except OSError as e:
if (os.path.exists(sagemakerLogLocation)):
pass
else:
raise OSError('sagemakerLogLocation error.')
self.sagemakerLogLocation=str(sagemakerLogLocation)
filename_mlops = 'mlopslog_'+str(int(time.time()))
filename_mlops=filename_mlops+'.log'
# filename = 'mlopsLog_'+Time()
filepath = os.path.join(self.sagemakerLogLocation, filename_mlops)
logging.basicConfig(filename=filepath, format='%(message)s',filemode='w')
# logging.basicConfig(filename="uq_logging.log", format='%(asctime)s %(message)s',filemode='w')
# logging.basicConfig(filename="uq_logging.log", format=' %(message)s',filemode='w')
# logging.basicConfig(filename='uq_logging.log', encoding='utf-8', level=logging.INFO)
self.log = logging.getLogger('aionMLOps')
self.log.setLevel(logging.DEBUG)
# mlflow.set_experiment(self.experiment_name)
except Exception as e:
self.log.info('<!------------- mlflow model INIT Error ---------------> '+str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def mlflowSetPath(self,path):
track_dir=os.path.join(path,'mlruns')
uri="file:"+str(Path(track_dir))
return uri
#Currently not used this delete ecr repository option
def ecr_repository_delete(self,rep_name):
# import subprocess
client = boto3.client('ecr')
repositories = client.describe_repositories()
ecr_delete_rep=client.delete_repository(registryId=self.aws_id,repositoryName=self.ecrRepositoryName,force=True)
mlflow_ecr_delete=subprocess.run(['aws', 'ecr', 'delete-repository','--repository-name',rep_name,'||','true'])
self.log.info('Success: deleted aws ecr repository which contains mlops image.')
def check_sm_deploy_status(self,app_name):
sage_client = boto3.client('sagemaker', region_name=self.aws_region)
endpoint_description = sage_client.describe_endpoint(EndpointName=app_name)
endpoint_status = endpoint_description["EndpointStatus"]
try:
failure_reason=endpoint_description["FailureReason"]
self.log.info("sagemaker end point creation failure reason is: "+str(failure_reason))
except:
pass
endpoint_status=str(endpoint_status)
return endpoint_status
def invoke_sm_endpoint(self,app_name, input_json):
client = boto3.session.Session().client("sagemaker-runtime", self.aws_region)
response = client.invoke_endpoint(
EndpointName=app_name,
Body=input_json,
ContentType='application/json; format=pandas-split',
)
# preds = response['Body'].read().decode("ascii")
preds = response['Body'].read().decode("ascii")
preds = json.loads(preds)
# print("preds: {}".format(preds))
return preds
def predict_sm_app_endpoint(self,X_test):
#print(X_test)
import pandas as pd
prediction=None
AWS_ACCESS_KEY_ID=str(self.awsaccesskey_id)
AWS_SECRET_ACCESS_KEY=str(self.awssecretaccess_key)
AWS_SESSION_TOKEN=str(self.aws_session_token)
region = str(self.aws_region)
#Existing model deploy options
# mlflowtosagemakerPushImageName=str(self.mlflowtosagemakerPushImageName)
# mlflowtosagemakerdeployModeluri=str(self.mlflowtosagemakerdeployModeluri)
try:
import subprocess
cmd = 'aws configure set region_name '+region
os.system(cmd)
cmd = 'aws configure set aws_access_key_id '+AWS_ACCESS_KEY_ID
os.system(cmd)
cmd = 'aws configure set aws_secret_access_key '+AWS_SECRET_ACCESS_KEY
os.system(cmd)
'''
aws_region=subprocess.run(['aws', 'configure', 'set','region_name',region])
aws_accesskeyid=subprocess.run(['aws', 'configure', 'set','aws_access_key_id',AWS_ACCESS_KEY_ID])
aws_secretaccesskey=subprocess.run(['aws', 'configure', 'set','aws_secret_access_key',AWS_SECRET_ACCESS_KEY])
'''
except:
pass
#Create a session for aws |
communication using aws boto3 lib
# s3_client = boto3.client('ecr',aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region)
# s3 = boto3.resource('ecr', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key= AWS_SECRET_ACCESS_KEY)
session = boto3.Session(aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region)
#X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=2)
# query_input = pd.DataFrame(X_test).iloc[[1,5]].to_json(orient="split")
try:
query_input = pd.DataFrame(X_test).to_json(orient="split")
#print(query_input)
prediction = self.invoke_sm_endpoint(app_name=self.sm_app_name, input_json=query_input)
# self.log.info("sagemaker end point Prediction: \\n"+str(prediction))
except Exception as e:
print(e)
return prediction
def deleteSagemakerApp(self,app_name,region):
# import mlflow.sagemaker as mfs
# region = 'ap-south-1'
# app_name = 'aion-demo-app'
mfs.delete(app_name=app_name,region_name=region, archive=False,synchronous=True, timeout_seconds=300)
# print("AION mlops sagemaker application endpoint is deleted....\\n")
self.log.info('AION mlops sagemaker application endpoint is deleted, application name is: '+str(app_name))
def deployModel2sagemaker(self,mlflow_container_name,tag_id,model_path):
region = str(self.aws_region)
aws_id = str(self.aws_id)
iam_sagemakerfullaccess_arn = str(self.iam_sagemakerfullaccess_arn)
app_name = str(self.sm_app_name)
model_uri = str(model_path)
app_status=False
mlflow_root_dir = None
try:
os.chdir(str(self.sagemakerLogLocation))
mlflow_root_dir = os.getcwd()
self.log.info('mlflow root dir: '+str(mlflow_root_dir))
except:
self.log.info("path issue.")
try:
c_status=self.check_sm_deploy_status(app_name)
#if ((c_status == "Failed") or (c_status == "OutOfService")):
if ((c_status == "Failed") or (c_status.lower() == "failed")):
app_status=False
self.log.info("Sagemaker endpoint status: Failed.\\n")
mfs.delete(app_name=app_name,region_name=region, archive=False,synchronous=True, timeout_seconds=300)
elif ((c_status.lower() == "inservice") or (c_status == "InService")):
app_status=True
self.log.info("Sagemaker endpoint status: InService. Running sagemaker endpoint name: \\n"+str(app_name))
else:
app_status=False
pass
except:
# print("deploy status error.\\n")
pass
#aws ecr model app_name should contain only [[a-zA-Z0-9-]]
import re
if app_name:
pattern = re.compile("[A-Za-z0-9-]+")
# if found match (entire string matches pattern)
if pattern.fullmatch(app_name) is not None:
#print("Found match: ")
pass
else:
app_name = 'aion-demo-app'
else:
app_name = 'aion-demo-app'
mlflow_image=mlflow_container_name+':'+tag_id
image_url = aws_id + '.dkr.ecr.' + region + '.amazonaws.com/' + mlflow_image
deploy_option="create"
self.log.info('deploy_option: \\n'+str(deploy_option))
if (deploy_option.lower() == "create"):
# Other deploy modes: mlflow.sagemaker.DEPLOYMENT_MODE_ADD,mlflow.sagemaker.DEPLOYMENT_MODE_REPLACE
if not (app_status):
try:
mfs.deploy(app_name=app_name,model_uri=model_uri,region_name=region,mode="create",execution_role_arn=iam_sagemakerfullaccess_arn,image_url=image_url)
self.log.info('sagemaker endpoint created and model deployed. Application name is: \\n'+str(app_name))
except:
self.log.info('Creating end point application issue.Please check the connection and aws credentials \\n')
else:
self.log.info('Sagemaker application with user endpoint name already running.Please check. Please delete the old endpoint with same name.\\n')
elif (deploy_option.lower() == "delete"):
# import mlflow.sagemaker as mfs
# # region = 'ap-south-1'
# # app_name = 'aion-demo-app'
# mfs.delete(app_name=app_name,region_name=region, archive=False,synchronous=True, timeout_seconds=300)
# print("Mlflow sagemaker application endpoint is deleted....\\n")
# self.log.info('Mlflow sagemaker application endpoint is deleted, application name is: '+str(app_name))
pass
elif (deploy_option.lower() == "add"):
pass
elif (deploy_option.lower() == "replace"):
pass
else:
pass
return app_status
def mlflow2sagemaker_deploy(self):
self.log.info('<!------------- Inside AION mlops to sagemaker communication and deploy process. ---------------> ')
deploy_status=False
app_name = str(self.sm_app_name)
self.log.info('Sagemaker Application Name: '+str(app_name))
uri_mlflow=self.mlflowSetPath(self.sagemakerLogLocation)
mlflow.set_tracking_uri(uri_mlflow)
mlops_trackuri=mlflow.get_tracking_uri()
mlops_trackuri=str(mlops_trackuri)
self.log.info('mlops tracking uri: '+str(mlops_trackuri))
localhost_deploy=False
try:
#Loading aion model to deploy in sagemaker
mlflow.set_experiment(self.experiment_name)
self.log.info('Endpoint Name: '+str(self.experiment_name))
# Assume, the model already loaded from joblib in aionmlflow2smInterface.py file.
aionmodel2deploy=self.model
# run_id = None
# experiment_id=None
# Use the loaded pickled model to make predictions
# pred = knn_from_pickle.predict(X_test)
with mlflow.start_run(run_name='AIONMLOps') as run:
# aionmodel2deploy.fit(X_train, y_train)
# predictions = aionmodel2deploy.predict(X_test)
mlflow.sklearn.log_model(aionmodel2deploy, self.mlflow_modelname)
run_id = run.info.run_uuid
experiment_id = run.info.experiment_id
self.log.info('AION mlops experiment run_id: '+str(run_id))
self.log.info('AION mlops experiment experiment_id: '+str(experiment_id))
self.log.info('AION mlops experiment model_name: '+str(self.mlflow_modelname))
artifact_uri = {mlflow.get_artifact_uri()}
# print("1.artifact_uri: \\n",artifact_uri)
mlflow.end_run()
#If we need, we can check the mlflow experiments.
# try:
# mlflow_client = mlflow.tracking.MlflowClient('./mlruns')
# exp_list = mlflow_client.list_experiments()
# except:
# pass
#print("mlflow exp_list: \\n",exp_list)
mlflow_modelname=str(self.mlflow_modelname)
mlops_trackuri=mlops_trackuri.replace('file:','')
mlops_trackuri=str(mlops_trackuri)
# mlflow_root_dir = os.getcwd()
mlflow_root_dir = None
try:
os.chdir(str(self.sagemakerLogLocation))
mlflow_root_dir = os.getcwd()
self.log.info('mlflow root dir: '+str(mlflow_root_dir))
except:
self.log.info("path issue.")
model_path = 'mlruns/%s/%s/artifacts/%s' % (experiment_id, run_id,self.mlflow_modelname)
# model_path=mlops_trackuri+'\\\\%s\\\\%s\\\\artifacts\\\\%s' % (experiment_id, run_id,mlflow_modelname)
self.log.info("local host aion mlops model_path is: "+str(model_path))
time.sleep(2)
#print("Environment variable setup in the current working dir for aws sagemaker cli connection... \\n")
self.log.info('Environment variable setup in the current working dir for aws sagemaker cli connection... \\n ')
AWS_ACCESS_KEY_ID=str(self.awsaccesskey_id)
AWS_SECRET_ACCESS_KEY=str(self.awssecretaccess_key)
AWS_SESSION_TOKEN=str(self.aws_session_token)
region = str(self.aws_region)
#Existing model deploy options
mlflowtosagemakerPushImageName=str(self.mlflowtosagemakerPushImageName)
mlflowtosagemakerdeployModeluri=str(self.mlflowtosagemakerdeployModeluri)
import subprocess
cmd = 'aws configure set region_name '+region
os.system(cmd)
cmd = 'aws configure set aws_access_key_id '+AWS_ACCESS_KEY_ID
os.system(cmd)
cmd = 'aws configure set aws_secret_access_key '+AWS_SECRET_ACCESS_KEY
os.system(cmd)
#Create a session for aws communication using aws boto3 lib
# s3_client = boto3.client('ecr',aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region)
# s3 = boto3.resource('ecr', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key= AWS_SECRET_ACCESS_KEY)
session = boto3.Session(aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region)
# session = boto3.session.Session(
# aws_access_key_id=AWS_ACCESS_KEY_ID,
# aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
# aws_session_token=AWS_SESSION_TOKEN
# )
# awsclient = session.resource('ecr')
# s3 = session.resource('s3')
self.log.info('aws environment variable setup done... \\n')
try:
os.chdir(mlflow_root_dir)
except FileNotFoundError:
self.log.info('Directory does not exist. '+str(mlflow_root_dir))
except NotADirectoryError:
self.log.info('model_path is not a directory. '+str(mlflow_root_dir))
except PermissionError:
self.log.info('Issue in permissions to change to model dir. '+str(mlflow_root_dir))
mlflow_container_name=str(self.mlflow_container_name)
mlflow_version=mlflow.__version__
tag_id=mlflow_version
if (self.mlflowtosagemakerPushOnly.lower() == "true"):
self.log.info('Selected option is <Deploy existing model to sagemaker> \\n')
aws_id=str(self.aws_id)
arn=str(self.iam_sagemakerfullaccess_arn)
mlflow_image=mlflow_container_name+':'+tag_id
image_url = aws_id+'.dkr.ecr.'+region+'.amazonaws.com/'+mlflow_image
# print("image_url:========= \\n",image_url)
deploy_status=True
try:
model_path=mlflowtosagemakerdeployModeluri
# ##We need to run mlflow docker container command in the artifacts->model directory inside mlruns.
self.log.info('Deploy existing model container-Model path given by user: '+str(model_path))
try:
os.chdir(model_path)
except FileNotFoundError:
self.log.info('Directory does not exist. '+str(model_path))
except NotADirectoryError:
self.log.info('model_path is not a directory. '+str(model_path))
except PermissionError:
self.log.info('Issue in permissions to change to model dir. '+str(model_path))
try:
mfs.push_image_to_ecr(image=mlflowtosagemakerPushImageName)
deploy_status=True
self.log.info('AION mlops pushed the docker container to aws ecr. \\n ')
except:
self.log.info("error in pushing existing container to ecr.\\n")
deploy_status=False
time.sleep(2)
#Now,change the working dir to root dir,because now deploy needs full mlruns to model name dir.
try:
# print(" Changing directory to mlflow root dir....\\n")
os.chdir(mlflow_root_dir)
except FileNotFoundError:
self.log.info('model path is not a directory. '+str(mlflow_root_dir))
except NotADirectoryError:
self.log.info('model path is not a directory. '+str(mlflow_root_dir))
# print("{0} is not a directory".format(mlflow_root_dir))
except PermissionError:
self.log.info('Issue in permissions to change to model dir. '+str(ml |
flow_root_dir))
# self.deployModel2sagemaker(mlflowtosagemakerPushImageName,tag_id,mlflowtosagemakerdeployModeluri)
try:
if (deploy_status):
self.deployModel2sagemaker(mlflowtosagemakerPushImageName,tag_id,mlflowtosagemakerdeployModeluri)
self.log.info('AION creates docker container and push the container into aws ecr.. ')
time.sleep(2)
except:
self.log.info('AION deploy error.check connection and aws config parameters. ')
deploy_status=False
# self.log.info('model deployed in sagemaker. ')
except Exception as e:
self.log.info('AION mlops failed to push docker container in aws ecr, check configuration parameters. \\n'+str(e))
elif (self.mlflowtosagemakerPushOnly.lower() == "false"):
if (self.mlflowtosagemakerDeploy.lower() == "true"):
self.log.info('Selected option is <Create and Deploy model> \\n')
deploy_status=True
try:
# ##We need to run mlflow docker container command in the artifacts->model directory inside mlruns.
try:
os.chdir(model_path)
except FileNotFoundError:
self.log.info('Directory does not exist. '+str(model_path))
except NotADirectoryError:
self.log.info('model_path is not a directory. '+str(model_path))
except PermissionError:
self.log.info('Issue in permissions to change to model dir. '+str(model_path))
try:
mlflow_container_push=subprocess.run(['mlflow', 'sagemaker', 'build-and-push-container','--build','--push','--container',mlflow_container_name])
self.log.info('AION mlops creates docker container and push the container into aws ecr.. ')
deploy_status=True
time.sleep(2)
except:
self.log.info('error in pushing aion model container to sagemaker, please check the connection between local host to aws server.')
deploy_status=False
self.log.info('Now deploying the model container to sagemaker starts....\\n ')
# Once docker push completes, again going back to mlflow parent dir for deployment
#Now,change the working dir to root dir,because now deploy needs full mlruns to model name dir.
try:
os.chdir(mlflow_root_dir)
except FileNotFoundError:
self.log.info('model_path does not exist. '+str(mlflow_root_dir))
except NotADirectoryError:
self.log.info('model_path is not a directory. '+str(mlflow_root_dir))
except PermissionError:
self.log.info('Issue in permissions to change to model dir. '+str(mlflow_root_dir))
# app_name = str(self.sm_app_name)
try:
if (deploy_status):
self.deployModel2sagemaker(mlflow_container_name,tag_id,model_path)
except:
self.log.info('mlops deploy error.check connection')
deploy_status=False
except Exception as e:
exc = {"status":"FAIL","message":str(e).strip('"')}
out_exc = json.dumps(exc)
self.log.info('mlflow failed to creates docker container please check the aws iam,ecr permission setup, aws id access_key,secret key values for aion.\\n')
elif(self.mlflowtosagemakerDeploy.lower() == "false"):
deploy_status=False
localhost_deploy=True
self.log.info('Selected option is <Create AION mlops container in local host .> \\n')
self.log.info("User selected create-Deploy sagemaker option as False,")
self.log.info("Creates the AION mlops-sagemaker container locally starting,but doesn't push into aws ecr and deploy in sagemaker. Check the container in docker repository. ")
try:
# ##We need to run AION mlops docker container command in the artifacts->model directory inside mlruns.
try:
os.chdir(model_path)
self.log.info('After change to AION mlops model dir, cwd: '+str(model_path))
except FileNotFoundError:
self.log.info('Directory does not exist. '+str(model_path))
except NotADirectoryError:
self.log.info('model_path is not a directory. '+str(model_path))
except PermissionError:
self.log.info('Issue in permissions to change to model dir. '+str(model_path))
# mlflow_container_local=subprocess.run(['AION mlops', 'sagemaker', 'build-and-push-container','--build','--no-push','--container',mlflow_container_name])
try:
if not (deploy_status):
mlflow_container_local=subprocess.run(['mlflow', 'sagemaker', 'build-and-push-container','--build','--no-push','--container',mlflow_container_name])
self.log.info('AION creates local host bsed docker container and push the container local docker repository. Check with <docker images> command.\\n ')
localhost_deploy=True
time.sleep(2)
except:
self.log.info('error in pushing aion model container to sagemaker, please check the connection between local host to aws server.')
deploy_status=False
localhost_deploy=False
# print("AION mlops creates docker container and push the container into aws ecr.\\n")
self.log.info('AION mlops creates docker container and stored locally... ')
time.sleep(2)
except Exception as e:
localhost_deploy=False
# print("mlflow failed to creates docker container please check the aws iam,ecr permission setup, aws id access_key,secret key values for aion.\\n")
self.log.info('AION mlops failed to creates docker container in local machine.\\n'+str(e))
else:
self.log.info('Deploy option not selected, Please check. ')
localhost_deploy=False
deploy_status=False
else:
pass
localhost_container_status="Notdeployed"
mlflow2sm_deploy_status="Notdeployed"
if localhost_deploy:
localhost_container_status="success"
mlflow2sm_deploy_status="Notdeployed"
# print("AION creates local docker container successfully.Please check in docker repository.")
self.log.info("AION creates local docker container successfully.Please check in docker repository.")
# else:
# localhost_container_status="failed"
# # print("AION failed to create local docker container successfully.Please check in docker repository.")
# self.log.info("AION failed to create local docker container successfully.Please check in docker repository.")
if (deploy_status):
# Finally checking whether mlops model is deployed to sagemaker or not.
app_name = str(self.sm_app_name)
deploy_s = self.check_sm_deploy_status(app_name)
if (deploy_s == "InService"):
# print("AION mlops model is deployed at aws sagemaker, use application name(app_name) and region to access.\\n")
self.log.info('AION mlops model is deployed at aws sagemaker, use application name(app_name) and region to access.\\n'+str(app_name))
mlflow2sm_deploy_status="success"
localhost_container_status="Notdeployed"
else:
# print("AION Mlflow model not able to deploy at aws sagemaker\\n")
self.log.info('AION mlops model not able to deploy at aws sagemaker.\\n')
mlflow2sm_deploy_status="failed"
localhost_container_status="Notdeployed"
# else:
# mlflow2sm_deploy_status="None"
return mlflow2sm_deploy_status,localhost_container_status
except Exception as inst:
exc = {"status":"FAIL","message":str(inst).strip('"')}
out_exc = json.dumps(exc)
<s> import json
import ast
import sys
import time
from pathlib import Path
import pandas as pd
from AION.llm import llm_utils
bench_mark_file = {'code':'code_eval.sh', 'doc': 'doc_eval.sh'}
DB_TABLE = 'llm_benchmarking'
def bench_mark(hypervisor,instanceid,model,usecaseid,eval='code'):
output = {}
started = False
if eval not in bench_mark_file.keys():
raise ValueError(f"Evaluation for '{eval}' is not supported.\\nSupported types are {list(bench_mark_file.keys())}")
db = benchmark_db( DB_TABLE, usecaseid)
db.update_state('running')
try:
server = llm_utils.hypervisor( hypervisor,instanceid)
if not server.is_already_running():
started, msg = server.start()
if not started:
raise ValueError( msg)
ssh = server.ssh_details()
pem_file = str(Path(__file__).parent/ssh['keyFilePath'])
sh_file = llm_utils.remote_code_dir(as_str=True) + '/' + bench_mark_file[eval]
cmd = sh_file + ' ' + usecaseid + ' '+ str(model)
print(cmd)
from llm.ssh_command import run_ssh_cmd
buf = run_ssh_cmd(server.ip, pem_file, ssh['userName'], '', '', cmd)
if isinstance( buf, str):
print( buf)
else:
print( buf.decode('utf-8'))
if buf:
if 'eval_output:' in buf:
output = buf.split('eval_output:')[-1].rstrip()
output = ast.literal_eval( output)
record = {}
record['state'] = 'Success'
record['eval_type'] = eval
record['result_type'] = 'value' if eval =='code' else 'dict'
record['result'] = output
db.put_record( record)
else:
record = {}
record['state'] = 'Error'
record['eval_type'] = eval
db.put_record( record)
return output
except Exception as e:
print(e)
record = {}
record['state'] = 'Error'
record['eval_type'] = eval
record['result_type'] = 'value' if eval =='code' else 'dict'
record['result'] = [{'error': str(e)}]
db.put_record( record)
output = {'status':'Error','msg':str(e)}
return output
class benchmark_db():
def __init__(self, table_name, usecaseid):
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
file_path = str(Path(DATA_DIR)/'sqlite')
self.sqlite_obj = sqlite_db(file_path,'config.db')
self.table_name = table_name
self.usecaseid = usecaseid
self.columns = ['usecaseid','state','eval_type','result_type','result']
self.sqlite_obj.create_table(self.table_name, self.columns, ['TEXT' for x in self.columns])
def put_record(self, record={}):
db_data = self.sqlite_obj.get_data(self.table_name,'usecaseid',self.usecaseid)
if (len(db_data) > 0):
self.sqlite_obj.update_dict_data(record,f'"usecaseid"="{self.usecaseid}"',self.table_name)
else:
data = {x:[str(record[x])] if x in record.keys() else [''] for x in self.columns}
data['usecaseid'] = self.usecaseid
self.sqlite_obj.write_data(pd.DataFrame.from_dict(data),self.table_name)
def update_state(self, state, error=None):
data = {x:'' for x in self.columns}
data['state'] = state
data['usecaseid'] = self.usecaseid
if error:
data['result'] = error
self.put_record( data)
def read_data(self):
return self.sqlite_obj.read_data(self.table_name)
if __name__ == '__main__':
run_code_benchmarking = False
if run_code_benchmarking:
#for code
bench_mark('aws','i-0c7bfeddd00658f45','CodeLLaMA-2-7B','AI0025_1',eval='code')
else:
# for document
bench_mark('aws','i-0c7bfeddd00658f45','LLaMA-2-7B','AI0041_1',eval='doc')
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
file_path = str(Path(DATA_DIR)/'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
data = sqlite_obj.read_data('llm_benchmarking')
print(data)
<s> import json
import os
import sys
import pandas as pd
import time
from stat import S_ISDIR, S_ISREG
import paramiko
from pathlib import Path
import logging
import boto3
from botocore.exceptions import ClientError
import re
remote_data_dir = '/home/ubuntu/AION/data/storage'
remote_config_dir = '/home/ubuntu/AION/data/config'
running_state_code = 16
stopped_state_code = 80
sh_file_path = '/home/ubuntu/AION/llm/sbin/run_experiment.sh '
prompt_command = '/home/ubuntu/AION/llm/sbin/run_inference.sh'
def create_instance(image_id, instance_type, security_group_id,region,instance_name,aws_access_key_id,aws_secret_key):
try:
ec2 = boto3.client('ec2', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_key, region_name=region)
response = ec2.run_instances(ImageId=image_id, InstanceType=instance_type, SecurityGroupIds=[security_group_id], MaxCount=1, MinCount=1, TagSpecifications=[{'ResourceType': 'instance', 'Tags': [{'Key': 'Name', 'Value': instance_name}]}])
#print('Instance ID:', response['Instances'][0]['InstanceId'])
return response['Instances'][0]['InstanceId'],''
except Exception as e:
print(e)
return '',str(e)
def check_instance(aws_access_key_id, aws_secret_key, instance_id,region):
ip = ''
ec2 = boto3.client('ec2', region_name=region, aws_access_key_id= |
aws_access_key_id,
aws_secret_access_key=aws_secret_key)
response = ec2.describe_instance_status(InstanceIds=[instance_id],IncludeAllInstances=True)
if response['InstanceStatuses'][0]['InstanceState']['Name'] == 'running':
ip = response['Reservations'][0]['Instances'][0]['PublicIpAddress']
return 'Running',ip
else:
return 'NotRunning',ip
def get_instance_ip(aws_access_key_id, aws_secret_key, instance_id,region):
try:
ec2 = boto3.client('ec2', region_name=region, aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_key)
response = ec2.describe_instances(InstanceIds=[instance_id])
ip = response['Reservations'][0]['Instances'][0]['PublicIpAddress']
return ip
except Exception as e:
print(e)
return ''
def start_instance(aws_access_key_id, aws_secret_key, instance_id,region):
ip = ''
ec2 = boto3.client('ec2', region_name=region, aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_key)
response = ec2.describe_instance_status(InstanceIds=[instance_id],IncludeAllInstances=True)
if response['InstanceStatuses'][0]['InstanceState']['Name'] == 'running':
print("Instance is already running")
try:
response = ec2.start_instances(InstanceIds=[instance_id], DryRun=False)
#print(response)
instance_status_code = 0
while instance_status_code != running_state_code:
response = ec2.describe_instances(InstanceIds=[instance_id])
instance_status_code = response['Reservations'][0]['Instances'][0]['State']['Code']
if instance_status_code == running_state_code:
ip = response['Reservations'][0]['Instances'][0]['PublicIpAddress']
break
except Exception as e:
print(e)
return 'Fail',str(e),''
return 'Success','Success',ip
def is_running(instance_id,region,aws_access_key_id,aws_secret_key):
try:
ec2 = boto3.client('ec2', region_name=region, aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_key)
response = ec2.describe_instance_status(InstanceIds=[instance_id], IncludeAllInstances=True)
if 'Reservations' in response and len(response['Reservations']) > 0:
state = response['Reservations'][0]['Instances'][0]['State']['Name']
return state
elif 'InstanceStatuses' in response:
return response['InstanceStatuses'][0]['InstanceState']['Name']
else :
return 'failed'
except:
return "error"
def terminate_instance(instance_id,region,aws_access_key_id,aws_secret_key):
try:
ec2 = boto3.client('ec2', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_key, region_name=region)
response = ec2.terminate_instances(InstanceIds=[instance_id])
return response['TerminatingInstances'][0]['InstanceId'],'success'
except Exception as e:
print(e),'failed'
def copy_files_to_server(ip, pem_file,local_data_file_path,local_config_file_path,username):
try:
host = ip
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
while True:
try:
client.connect(host, username=username, key_filename=pem_file)
sftp = client.open_sftp()
break
except:
time.sleep(10)
try:
sftp.stat(remote_data_dir)
print(f"Path: '{remote_data_dir}' already exist")
except FileNotFoundError:
sftp.mkdir(remote_data_dir)
try:
sftp.stat(remote_config_dir)
print(f"Path: '{remote_config_dir}' already exist")
except FileNotFoundError:
sftp.mkdir(remote_config_dir)
Train_Data_InRemoteArtifacts = sftp.listdir(remote_data_dir)
for traindata_files in Train_Data_InRemoteArtifacts:
print(traindata_files)
if not traindata_files.endswith('.ipynb_checkpoints'):
sftp.remove(remote_data_dir +'/'+ traindata_files)
if os.path.isdir(local_data_file_path):
list_pdf_json_files = os.listdir(local_data_file_path)
for file_pdf_json in list_pdf_json_files:
sftp.put(local_data_file_path+'/'+file_pdf_json, remote_data_dir+'/'+ file_pdf_json)
print(file_pdf_json + " data copied successfully")
else:
filename = os.path.basename(local_data_file_path)
directoryname= os.path.dirname(local_data_file_path)
sftp.put(directoryname+'/'+filename, remote_data_dir+'/'+ filename)
print(filename + " data copied successfully")
config_InRemoteArtifacts = sftp.listdir(remote_config_dir)
for config_file in config_InRemoteArtifacts:
print(config_file)
if not config_file.endswith('.ipynb_checkpoints'):
sftp.remove(remote_config_dir +'/'+ config_file)
if local_config_file_path != '':
if os.path.isdir(local_config_file_path):
list_pdf_json_files = os.listdir(local_config_file_path)
for file_pdf_json in list_pdf_json_files:
sftp.put(local_config_file_path+'/'+file_pdf_json, remote_config_dir+'/'+ file_pdf_json)
print(file_pdf_json + " config copied successfully")
else:
# updated_config_file_path = create_config(local_config_file_path)
filename = os.path.basename(local_config_file_path)
directoryname= os.path.dirname(local_config_file_path)
sftp.put(directoryname+'/'+filename, remote_config_dir+'/'+ 'config.json')
print(filename + " config copied successfully")
remote_files = sftp.listdir(remote_config_dir)
print("List of uploaded files",remote_files)
sftp.close()
client.close()
except Exception as e:
print(e)
def check_status(ip,username,pem_file):
logoutput = read_log_file(ip,username,pem_file)
if "aion_llmfinetuning_Status" in logoutput:
return True
else:
return False
def read_log_file(ip,username,pem_file):
host = ip
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(host, username=username, key_filename=pem_file)
log_file_path = '/home/ubuntu/AION/data/log/fine_tuning_log.log'
stdin, stdout, stderr = client.exec_command(f'tail -n 500 {log_file_path}')
time.sleep(5)
client.close()
return stdout.read().decode()
def run_ssh_cmd(ip,pem_file,username,log,command):
try:
buf = ''
host = ip
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
while True:
try:
client.connect(host, username=username, key_filename=pem_file)
break
except:
time.sleep(10)
stdin, stdout, stderr =client.exec_command(command)
for line in stdout:
if log != '':
log.info(line.strip())
else:
if buf != '':
buf= buf+'\\n'
buf = buf+line.strip()
print(buf)
client.close()
return buf
except Exception as e:
print(str(e))
raise Exception(str(e))
def start_logging(deployFolder,modelName,version):
try:
deployLocation = Path(deployFolder)/modelName/str(version)/'log'
deployLocation.mkdir(parents=True, exist_ok=True)
name = 'model_training_logs.log'
filehandler = logging.FileHandler(deployLocation/name, 'w','utf-8')
log = logging.getLogger('log_llm')
log.propagate = False
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
log.removeHandler(hdlr)
log.addHandler(filehandler)
log.setLevel(logging.INFO)
return log
except Exception as e:
print(str(e))
def update_sqllite_data(usecaseid,variable,variable_value):
try:
print(usecaseid)
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR,'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
if sqlite_obj.table_exists("LLMTuning"):
data = sqlite_obj.get_data('LLMTuning','usecaseid',usecaseid)
if (len(data) > 0):
sqlite_obj.update_data('"'+variable+'"="'+variable_value+'"','"usecaseid"="'+str(usecaseid)+'"','LLMTuning')
return('Success')
data = dict(usecaseid=usecaseid,ip='',instance='',hypervisor='AWS',status='NA')
data.update({variable:variable_value})
df = pd.DataFrame(data, index=[0])
sqlite_obj.write_data(df,'LLMTuning')
return('Success')
except Exception as e:
print(e)
return('Error')
def LLM_predict(cloudconfig,instanceid,promptfile):
with open(cloudconfig, 'r') as config_f:
cloud_infra = json.load(config_f)
config_f.close()
aws_access_key_id = cloud_infra['AWS_EC2']['AWSAccessKeyID']
aws_secret_key = cloud_infra['AWS_EC2']['AWSSecretAccessKey']
region = cloud_infra['AWS_EC2']['LLaMa7B']['RegionName']
ip = start_instance(aws_access_key_id, aws_secret_key, instanceid,region)
currentDirectory = os.path.dirname(os.path.abspath(__file__))
pem_file = os.path.join(currentDirectory,cloud_infra['AWS_EC2']['LLaMa7B']['ssh']['keyFilePath'])
username = cloud_infra['AWS_EC2']['LLaMa7B']['ssh']['userName']
copy_files_to_server(ip,pem_file,promptfile,'',username)
promptfile = os.path.basename(promptfile)
command = prompt_command+' '+remote_data_dir+'/'+ promptfile
buf = run_ssh_cmd(ip, pem_file, username,'',command)
return buf
def LLM_tuning_lemma7b(config,cloudconfig):
with open(config, 'r') as config_f:
config_data = json.load(config_f)
config_f.close()
modelid = config_data['basic']['modelName']+'_'+config_data['basic']['modelVersion']
log = start_logging(config_data['basic']['deployLocation'],config_data['basic']['modelName'],config_data['basic']['modelVersion'])
with open(cloudconfig, 'r') as config_f:
cloud_infra = json.load(config_f)
config_f.close()
currentDirectory = os.path.dirname(os.path.abspath(__file__))
aws_access_key_id = cloud_infra['AWS_EC2']['AWSAccessKeyID']
aws_secret_key = cloud_infra['AWS_EC2']['AWSSecretAccessKey']
instance_type = cloud_infra['AWS_EC2']['LLaMa7B']['InstanceSetting']['InstanceType']
security_group_id = cloud_infra['AWS_EC2']['LLaMa7B']['InstanceSetting']['SecurityGroupId']
region = cloud_infra['AWS_EC2']['LLaMa7B']['RegionName']
image_id = cloud_infra['AWS_EC2']['LLaMa7B']['amiId']
pem_file = os.path.join(currentDirectory,cloud_infra['AWS_EC2']['LLaMa7B']['ssh']['keyFilePath'])
username = cloud_infra['AWS_EC2']['LLaMa7B']['ssh']['userName']
datafile = config_data['basic']['dataLocation']
instance_name = config_data['basic']['modelName']+'_'+str(config_data['basic']['modelVersion'])+'_LLMTuning'
configfile = config
ip = cloud_infra['AWS_EC2']['LLaMa7B']['InstanceIP']
if image_id != '':
log.info("Status:-|... Create Instance")
instance_id = create_instance(image_id, instance_type, security_group_id,region,instance_name)
elif cloud_infra['AWS_EC2']['LLaMa7B']['InstanceId'] != '':
instance_id = cloud_infra['AWS_EC2']['LLaMa7B']['InstanceId']
update_sqllite_data(modelid,'instance',instance_id)
print(instance_id)
else:
instance_id = ''
ip = cloud_infra['AWS_EC2']['LLaMa7B']['InstanceIP']
if instance_id != '':
log.info("Status:-|... Start Instance")
update_sqllite_data(modelid,'status','Initialize')
ip = start_instance(aws_access_key_id, aws_secret_key, instance_id,region)
print(ip)
if ip != '':
update_sqllite_data(modelid,'ip',ip)
try:
log.info("Status:-|... Copy Files")
copy_files_to_server(ip,pem_file,datafile,configfile,username)
log.info("Status:-|... Start LLM Tuning")
update_sqllite_data(modelid,'status','Tuning')
run_ssh_cmd(ip, pem_file, username,log,sh_file_path)
log_data = read_log_file(ip,username,pem_file)
outputStr = re.search(r'aion_learner_status:(.*)', str(log_data), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
outputStr = json.loads(outputStr)
from llm.llm_tuning import save |
_output
outputStr = save_output(config_data['basic']['deployLocation'],config_data['basic']['modelName'],config_data['basic']['modelVersion'],outputStr)
print(outputStr)
if "Tuning completed Successfully" in log_data:
update_sqllite_data(modelid,'status','Success')
output = json.dumps(outputStr)
print(f"\\naion_learner_status:{output}\\n")
return output
else:
update_sqllite_data(modelid,'status','Error')
output = json.dumps(outputStr)
print(f"\\naion_learner_status:{output}\\n")
return output
except Exception as e:
print(e)
log.info(str(e))
output = {'status':'FAIL','message':str(e),'LogFile':''}
output = json.dumps(output)
print(f"\\naion_learner_status:{output}\\n")
return output
else:
output = {'status':'FAIL','message':'Not Configured Properly','LogFile':''}
output = json.dumps(output)
print(f"\\naion_learner_status:{output}\\n")
return output
def stop_server_instance(aws_access_key_id, aws_secret_access_key, instance_id,region):
ec2 = boto3.client('ec2', region_name=region, aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key)
try:
ec2.stop_instances(InstanceIds=[instance_id, ], DryRun=True)
except ClientError as e:
if 'DryRunOperation' not in str(e):
raise
# Dry run succeeded, call stop_instances without dryrun
try:
response = ec2.stop_instances(InstanceIds=[instance_id], DryRun=False)
response = ec2.describe_instances(InstanceIds=[instance_id])
instance_status_code = 0
while instance_status_code != stopped_state_code:
response = ec2.describe_instances(InstanceIds=[instance_id])
instance_status_code = response['Reservations'][0]['Instances'][0]['State']['Code']
if instance_status_code == stopped_state_code:
print("Instance Stopped")
break
return "Process Completed"
except ClientError as e:
print(e)
return "Process failed"
if __name__ == "__main__":
status,msg = LLM_tuning_lemma7b(sys.argv[1],sys.argv[2])
print(status, msg)
def check_file_on_server(file_path, ip, pem_file):
is_wait = True
try:
host = ip
username = "ubuntu"
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(host, username=username, key_filename=pem_file)
sftp = client.open_sftp()
sftp.stat(file_path)
print("Model File created On Server")
is_wait = False
except IOError:
is_wait = True
print("Model training is in progress..")
return is_wait
def removeremotefolder_files(sftp, path):
try:
files = sftp.listdir(path=path)
for f in files:
filepath = path + "/" + f
print(filepath)
if isdir(sftp, filepath):
removeremotefolder_files(sftp, filepath)
else:
sftp.remove(filepath)
sftp.rmdir(path)
except IOError as e:
print(e)
def isdir(sftp, path):
try:
return S_ISDIR(sftp.stat(path).st_mode)
except IOError:
return False
def get_remote_folder(ip, remotedir, localdir, pem_file, preserve_mtime=False):
host = ip
username = "ubuntu"
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(host, username=username, key_filename=pem_file)
sftp = client.open_sftp()
for entry in sftp.listdir(remotedir):
remotepath = remotedir + "/" + entry
localpath = os.path.join(localdir, entry)
mode = sftp.stat(remotepath).st_mode
if S_ISDIR(mode):
try:
os.mkdir(localpath, mode=777)
except OSError:
pass
get_remote_folder(ip, remotepath, localpath, pem_file, preserve_mtime)
elif S_ISREG(mode):
sftp.get(remotepath, localpath)
print("{} downloaded successfully".format(entry))
<s><s> import json
import os
import sys
import pandas as pd
import time
from stat import S_ISDIR, S_ISREG
from pathlib import Path
import logging
import re
import tarfile
from llm import llm_utils
#remote_data_dir = '/home/ubuntu/AION/data/storage'
remote_data_rawdata_dir = '/home/aion/data/storage/raw_data'
remote_data_processeddata_dir = '/home/aion/data/storage/processed_data'
remote_config_dir = '/home/aion/data/config'
sh_file_path = '/home/aion/llm/sbin/llm_model_finetuning.sh'
unstructured_script_path = '/home/aion/llm/sbin/llm_model_finetuning.sh'
def start_logging(deployFolder,modelName,version):
try:
deployLocation = Path(deployFolder)/modelName/str(version)/'log'
deployLocation.mkdir(parents=True, exist_ok=True)
name = 'model_training_logs.log'
filehandler = logging.FileHandler(deployLocation/name, 'w','utf-8')
log = logging.getLogger('log_llm')
log.propagate = False
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
log.removeHandler(hdlr)
log.addHandler(filehandler)
log.setLevel(logging.INFO)
return log
except Exception as e:
print(str(e))
def update_sqllite_data(usecaseid,variable,variable_value):
try:
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR,'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
if sqlite_obj.table_exists("LLMTuning"):
column_names = sqlite_obj.column_names('LLMTuning')
#print(column_names)
if 'region' not in column_names:
query = 'Alter Table LLMTuning ADD region TEXT'
sqlite_obj.execute_query(query)
if 'image' not in column_names:
query = 'Alter Table LLMTuning ADD image TEXT'
sqlite_obj.execute_query(query)
data = sqlite_obj.get_data('LLMTuning','usecaseid',usecaseid)
if (len(data) > 0):
sqlite_obj.update_data('"'+variable+'"="'+variable_value+'"','"usecaseid"="'+str(usecaseid)+'"','LLMTuning')
return('Success')
data = dict(usecaseid=usecaseid,ip='',instance='',hypervisor='NA',status='NA',region='',image='')
data.update({variable:variable_value})
df = pd.DataFrame(data, index=[0])
sqlite_obj.write_data(df,'LLMTuning')
return('Success')
except Exception as e:
print(e)
return('Error')
def save_output(deployFolder,modelName,version,outputstr,hypervisor,instance):
try:
deployLocation = Path(deployFolder)/modelName/str(version)/'etc'
deployLocation.mkdir(parents=True, exist_ok=True)
name = 'output.json'
dpath = Path(deployFolder)/modelName/str(version)
outputstr['data']['deployLocation'] = str(dpath)
outputstr['data']['vmDetails'] = str(hypervisor)+' Instance: '+str(instance)
outputstr['data']['LogFile'] = str(dpath/'log'/'model_training_logs.log')
with open(deployLocation/name, 'w',encoding='utf-8') as f:
json.dump(outputstr, f)
f.close()
return (outputstr)
except Exception as e:
print(str(e))
print(outputstr)
def llm_logs(config,cloudconfig,instanceid,hypervisor,mlmodels):
try:
with open(config, 'r') as config_f:
config_data = json.load(config_f)
config_f.close()
modelid = config_data['basic']['modelName']+'_'+config_data['basic']['modelVersion']
from appbe.compute import readComputeConfig
cloud_infra = readComputeConfig()
if hypervisor == 'AWS':
aws_access_key_id = cloud_infra['awsCredentials']['accessKey']
aws_secret_key = cloud_infra['awsCredentials']['secretAccessKey']
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['instances'], instanceid)
region = amiDetails['regionName']
from llm.aws_instance_api import check_instance
status,ip = check_instance(aws_access_key_id, aws_secret_key, instanceid,region)
if status.lower() == 'running':
currentDirectory = os.path.dirname(os.path.abspath(__file__))
pem_file = os.path.join(currentDirectory,amiDetails['ssh']['keyFilePath'])
username = amiDetails['ssh']['userName']
from llm.ssh_command import read_log_file
logs = read_log_file(ip,username,'',pem_file)
deployFolder = config_data['basic']['deployLocation']
modelName = config_data['basic']['modelName']
version = config_data['basic']['modelVersion']
deployLocation = Path(deployFolder)/modelName/str(version)/'log'
deployLocation.mkdir(parents=True, exist_ok=True)
name = 'model_training_logs.log'
with open(deployLocation/name, 'r+',encoding='utf-8') as f:
lines = [line.rstrip('\\n') for line in f]
for log in logs:
if log not in lines:
# inserts on top, elsewise use lines.append(name) to append at the end of the file.
lines.insert(0, log)
f.seek(0) # move to first position in the file, to overwrite !
f.write('\\n'.join(lines))
else:
status = {'status':'Error','msg':'Instance not running'}
output = json.dumps(status)
deployFolder = config_data['basic']['deployLocation']
modelName = config_data['basic']['modelName']
version = config_data['basic']['modelVersion']
deployLocation = Path(deployFolder)/modelName/str(version)/'log'
deployLocation.mkdir(parents=True, exist_ok=True)
name = 'model_training_logs.log'
with open(deployLocation/name, 'r+',encoding='utf-8') as f:
f.write('aion_learner_status:'+str(output))
f.close()
else:
credentialsJson = cloud_infra['gcpCredentials']['gcpCredentials']
projectID = cloud_infra['gcpCredentials']['projectID']
amiDetails = getAMIDetails(cloud_infra['GCP']['instances'], instanceid)
zone = amiDetails['regionName']
username = username = amiDetails['ssh']['userName']
currentDirectory = os.path.dirname(os.path.abspath(__file__))
pem_file = os.path.join(currentDirectory,amiDetails['ssh']['keyFilePath'])
password = ''
from llm.gcp_compute_api import check_instance
status,ip = check_instance(credentialsJson,projectID, zone, instanceid)
if status.lower() == 'running':
from llm.ssh_command import read_log_file
logs = read_log_file(ip,username,'',pem_file)
deployFolder = config_data['basic']['deployLocation']
modelName = config_data['basic']['modelName']
version = config_data['basic']['modelVersion']
deployLocation = Path(deployFolder)/modelName/str(version)/'log'
deployLocation.mkdir(parents=True, exist_ok=True)
name = 'model_training_logs.log'
with open(deployLocation/name, 'r+',encoding='utf-8') as f:
lines = [line.rstrip('\\n') for line in f]
for log in logs:
if log not in lines:
lines.insert(0, log)
f.seek(0) # move to first position in the file, to overwrite !
f.write('\\n'.join(lines))
else:
status = {'status':'Error','msg':'Instance not running'}
output = json.dumps(status)
deployFolder = config_data['basic']['deployLocation']
modelName = config_data['basic']['modelName']
version = config_data['basic']['modelVersion']
deployLocation = Path(deployFolder)/modelName/str(version)/'log'
deployLocation.mkdir(parents=True, exist_ok=True)
name = 'model_training_logs.log'
with open(deployLocation/name, 'r+',encoding='utf-8') as f:
f.write('aion_learner_status:'+str(output))
f.close()
except Exception as e:
print(e)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def tardirectory(path,tarfilepath,ext):
with tarfile.open(tarfilepath, 'w:tar') as tarhandle:
for root, dirs, files in os.walk(path):
for f in files:
if ext == 'doc':
if f.endswith('.' + 'doc') or f.endswith('.' + 'docx'):
tarhandle.add(os.path.join(root, f), arcname=f)
else:
if f.endswith('.'+ext):
tarhandle.add(os.path.join(root, f),arcname=f)
tarhandle.close()
def getAMIDetails(config,selectedAMI):
y = {}
for x in config:
if x['id'] == selectedAMI:
return x
return y
def run(config):
with open(config, 'r') as config_f:
config_data = json.load(config_f)
config_f.close()
modelid = config_data['basic']['modelName']+'_'+config_data['basic']['modelVersion']
log = start_logging(config_data['basic']['deployLocation'],config_data['basic']['modelName'],config_data |
['basic']['modelVersion'])
from appbe.compute import readComputeConfig
cloud_infra = readComputeConfig()
currentDirectory = os.path.dirname(os.path.abspath(__file__))
filetimestamp = str(int(time.time()))
instance_name = config_data['basic']['modelName']+'-'+str(config_data['basic']['modelVersion'])+'-LLM-'+filetimestamp
instance_name = instance_name.lower()
if config_data['basic']['preprocessing']['llmFineTuning']['unstructuredData'] == 'True':
if os.path.isdir(config_data['basic']['dataLocation']):
from appbe.dataPath import DATA_FILE_PATH
filetimestamp = str(int(time.time()))
tarfilepath = os.path.join(DATA_FILE_PATH,filetimestamp+'.tar')
tardirectory(config_data['basic']['dataLocation'],tarfilepath,config_data['basic']['folderSettings']['fileExtension'])
config_data['basic']['dataLocationUnstructured'] = tarfilepath
with open(config, "w") as outfile:
json.dump(config_data, outfile)
outfile.close()
if cloud_infra['computeInfrastructure'] == 'GCP':
log.info("Status:-|... Compute Infrastructure GCP GCE")
credentialsJson = cloud_infra['gcpCredentials']['gcpCredentials']
#credentialsJson = "C:\\AION\\GCP-Instance-Utilityv2\\GCP-Instance-Utility\\ers-research.json"
selectedID = cloud_infra['gcpCredentials']['selectedID']
projectID = cloud_infra['gcpCredentials']['projectID']
zone = cloud_infra['gcpCredentials']['regionName']
selectMachineType = cloud_infra['gcpCredentials']['machineType']
if selectMachineType.lower() == 'image':
amiDetails = getAMIDetails(cloud_infra['GCP']['machineImage'],selectedID)
machineImageName = amiDetails['id']
else:
amiDetails = getAMIDetails(cloud_infra['GCP']['instances'], selectedID)
zone = amiDetails['regionName']
machineImageName = ''
instance_name = selectedID
pem_file = os.path.join(currentDirectory,amiDetails['ssh']['keyFilePath'])
username = amiDetails['ssh']['userName']
if machineImageName != '':
log.info("Status:-|... Create Instance Start")
try:
server = llm_utils.gcp_server("",machineImageName)
ip,msg = server.create(instance_name)
log.info("Status:-|... Create Instance End")
if ip == '':
if "resources available" in msg:
msg = "The respective zone (or region) does not have enough resources available to fulfill the request. Please try after some time."
output_json = {"status": "FAIL", "message": str(msg), "LogFile": ''}
output = json.dumps(output_json)
log.info("Status:-|... Failed to create the instance. "+str(msg))
print(f"\\naion_learner_status:{output}\\n")
return output
except Exception as e:
print(str(e))
output_json = {"status":"FAIL","message":'Failed to create the instance.',"LogFile":''}
output = json.dumps(output_json)
log.info("Status:-|... Failed to create the instance.")
print(f"\\naion_learner_status:{output}\\n")
return output
else:
server = llm_utils.gcp_server(instance_name, "")
server.start()
ip = server.ip
if ip != '':
time.sleep(20)
if selectMachineType.lower() == 'machineimage':
update_sqllite_data(modelid, 'image', machineImageName)
update_sqllite_data(modelid,'hypervisor','GCP')
update_sqllite_data(modelid, 'region', zone)
update_sqllite_data(modelid,'ip',ip)
update_sqllite_data(modelid,'instance',instance_name)
from llm.ssh_command import copy_files_to_server
if config_data['basic']['preprocessing']['llmFineTuning']['unstructuredData'] == 'True':
datafile = config_data['basic']['dataLocationUnstructured']
else:
datafile = config_data['basic']['dataLocation']
log.info("Status:-|... Upload tuning data Start")
try:
if config_data['basic']['preprocessing']['llmFineTuning']['unstructuredData'] == 'True':
copy_files_to_server(ip,pem_file,datafile,config,username,'',remote_data_rawdata_dir,remote_config_dir)
else:
copy_files_to_server(ip, pem_file, datafile, config, username,'', remote_data_processeddata_dir,remote_config_dir)
time.sleep(20)
log.info("Status:-|... Upload tuning data End")
log.info("Status:-|... Start LLM Tuning")
update_sqllite_data(modelid,'status','Tuning')
from llm.ssh_command import run_ssh_cmd
if config_data['basic']['preprocessing']['llmFineTuning']['unstructuredData'] == 'True':
script_path = unstructured_script_path
else:
script_path = sh_file_path
print(script_path)
run_ssh_cmd(ip,pem_file, username,'',log,script_path)
from llm.ssh_command import read_log_file
log_data = read_log_file(ip,username,'',pem_file)
outputStr = re.search(r'aion_learner_status:(.*)', str(log_data), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
outputStr = json.loads(outputStr)
outputStr = save_output(config_data['basic']['deployLocation'],config_data['basic']['modelName'],config_data['basic']['modelVersion'],outputStr,'GCP GCE',instance_name)
vmRunning = config_data['basic'].get('vmRunning','KeepRunning')
if vmRunning.lower() != 'keeprunning':
from llm.gcp_compute_api import stop_instance
server.stop()
if "Tuning Completed Successfully" in log_data:
update_sqllite_data(modelid,'status','Success')
output = json.dumps(outputStr)
print(f"\\naion_learner_status:{output}\\n")
return output
else:
server.showndown()
update_sqllite_data(modelid,'status','Error')
output = json.dumps(outputStr)
print(f"\\naion_learner_status:{output}\\n")
return output
except Exception as e:
print(e)
server.showndown()
output_json = {"status": "FAIL", "message": str(e), "LogFile": ''}
output = json.dumps(output_json)
log.info("Status:-|... " + str(e))
print(f"\\naion_learner_status:{output}\\n")
return output
else:
output_json = {"status":"FAIL","message":'Failed to initialize the instance',"LogFile":''}
output = json.dumps(output_json)
log.info("Status:-|... Failed to initialize the instance")
print(f"\\naion_learner_status:{output}\\n")
return output
elif cloud_infra['computeInfrastructure'] == 'AWS':
log.info("Status:-|... Compute Infrastructure AWS EC2")
aws_access_key_id = cloud_infra['awsCredentials']['accessKey']
aws_secret_key = cloud_infra['awsCredentials']['secretAccessKey']
selectMachineType = cloud_infra['awsCredentials']['machineType']
selectedID = cloud_infra['awsCredentials']['selectedID']
region = cloud_infra['awsCredentials']['regionName']
if selectMachineType.lower() == 'ami':
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['amis'],selectedID)
instance_type = amiDetails['instanceSetting']['instanceType']
security_group_id = cloud_infra['awsCredentials']['securitygroupid']
else:
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['instances'], selectedID)
region = amiDetails['regionName']
#region = cloud_infra['AWS_EC2']['LLaMa7B']['RegionName']
image_id = amiDetails['id']
pem_file = os.path.join(currentDirectory,amiDetails['ssh']['keyFilePath'])
username = amiDetails['ssh']['userName']
if config_data['basic']['preprocessing']['llmFineTuning']['unstructuredData'] == 'True':
datafile = config_data['basic']['dataLocationUnstructured']
else:
datafile = config_data['basic']['dataLocation']
if selectMachineType.lower() == 'ami':
log.info("Status:-|... Create Instance Start")
server = llm_utils.aws_server('', image_id)
instance_id,msg = server.create(instance_name)
if instance_id == '':
output_json = {"status":"FAIL","message":'Failed to initialize the instance. '+str(msg),"LogFile":''}
output = json.dumps(output_json)
log.info("Status:-|... Failed to initialize the instance")
print(f"\\naion_learner_status:{output}\\n")
log.info(f"\\naion_learner_status:{output}\\n")
return output
log.info("Status:-|... Create Instance End")
elif selectMachineType.lower() == 'instance':
instance_id = image_id
update_sqllite_data(modelid,'instance',instance_id)
server = llm_utils.aws_server( instance_id, '')
else:
output_json = {"status":"FAIL","message":'AMI is not configured',"LogFile":''}
output = json.dumps(output_json)
log.info("Status:-|... AMI is not configured")
print(f"\\naion_learner_status:{output}\\n")
log.info(f"\\naion_learner_status:{output}\\n")
return output
# instance_id = ''
# ip = cloud_infra['AWS_EC2']['LLaMa7B']['InstanceIP']
try:
from appbe.models import get_published_models
already_published,published_usecase = get_published_models(instance_id)
if already_published:
Msg = f'Fine Tuned Model-{published_usecase} is already published at the same instance, Please unpublish the mentioned model to proceed.'
output_json = {"status":"Error","message":Msg,"LogFile":''}
output = json.dumps(output_json)
log.info("Status:-|... A Model is already Published at the same instance.")
print(f"\\naion_learner_status:{output}\\n")
log.info(f"\\naion_learner_status:{output}\\n")
return output
except Exception as e:
log.info(str(e))
print(str(e))
if instance_id != '':
log.info("Status:-|... Start Instance")
if selectMachineType.lower() == 'ami':
update_sqllite_data(modelid, 'image', image_id)
update_sqllite_data(modelid, 'region', region)
update_sqllite_data(modelid,'instance',instance_id)
update_sqllite_data(modelid,'hypervisor','AWS')
update_sqllite_data(modelid,'status','Initialize')
status,msg = server.start()
ip = server.ip
time.sleep(20)
if status and ip != '':
update_sqllite_data(modelid,'ip',ip)
try:
log.info("Status:-|... Copy Files")
from llm.ssh_command import copy_files_to_server
if config_data['basic']['preprocessing']['llmFineTuning']['unstructuredData'] == 'True':
print(ip,pem_file,datafile,config,username,'',remote_data_rawdata_dir,remote_config_dir)
copy_files_to_server(ip,pem_file,datafile,config,username,'',remote_data_rawdata_dir,remote_config_dir)
else:
print(ip, pem_file, datafile, config, username, '', remote_data_processeddata_dir, remote_config_dir)
copy_files_to_server(ip, pem_file, datafile, config, username, '', remote_data_processeddata_dir,remote_config_dir)
time.sleep(20)
log.info("Status:-|... Start LLM Tuning")
update_sqllite_data(modelid,'status','Tuning')
from llm.ssh_command import run_ssh_cmd
if config_data['basic']['preprocessing']['llmFineTuning']['unstructuredData'] == 'True':
script_path = unstructured_script_path
else:
script_path = sh_file_path
#print(script_path)
#sys.exit()
run_ssh_cmd(ip, pem_file, username,'',log,script_path)
from llm.ssh_command import read_log_file
log_data = read_log_file(ip,username,'',pem_file)
outputStr = re.search(r'aion_learner_status:(.*)', str(log_data), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
outputStr = json.loads(outputStr)
outputStr = save_output(config_data['basic']['deployLocation'],config_data['basic']['modelName'],config_data['basic']['modelVersion'],outputStr,'AWS EC2',instance_id)
vmRunning = config_data['basic'].get('vmRunning','KeepRunning')
if vmRunning.lower() != 'keeprunning':
server.stop()
if "Tuning Completed Successfully" in log_data:
update_sqllite_data(modelid,'status','Success')
output = json.dumps(outputStr)
print(f"\\naion_learner_status:{output}\\n")
log.info(f"\\naion_learner_status:{output}\\n")
return output
else:
server.showndown()
update_sqllite_data(modelid,'status','Error')
output = json.dumps(outputStr)
print(f"\\naion_learner_status:{output}\\n")
log.info(f"\\naion_learner_status:{output}\\n")
return output
except Exception as e:
print(e)
log.info(str(e))
server.showndown()
output = {'status': 'FAIL', 'message': str(e), 'LogFile': ''}
output = json.dumps(output)
print(f"\\naion_learner_status:{output}\\n")
log.info(f"\\naion_learner_status:{output}\\n")
return output
else:
output = {'status':'FAIL','message':msg,'LogFile':''}
output = json.dumps(output)
print(f"\\naion_learner_status:{output}\\n")
log.info(f"\\naion_learner_status |
:{output}\\n")
return output
else:
output_json = {"status":"FAIL","message":'Failed to initialize the instance',"LogFile":''}
output = json.dumps(output_json)
log.info("Status:-|... Failed to initialize the instance")
print(f"\\naion_learner_status:{output}\\n")
return output<s> import json
import os
import time
remote_data_dir = '/home/aion/data/storage/prompt'
remote_config_dir = '/home/aion/data/config'
prompt_command = '/home/aion/llm/sbin/llm_predict.sh'
command_prepare_model = '/home/aion/llm/sbin/llm_merge_weights.sh'
command_start_service = '/home/aion/llm/sbin/llm_publish_model.sh'
command_stop_service = 'publish.py'
from AION.llm import llm_utils
from pathlib import Path
def getAMIDetails(config,selectedAMI):
y = {}
for x in config:
print(x)
if x['id'] == selectedAMI:
return x
return y
def get_ip(cloudconfig,instanceid,hypervisor,region,image):
try:
# with open(cloudconfig, 'r') as config_f:
# cloud_infra = json.load(config_f)
# config_f.close()
from appbe.compute import readComputeConfig
cloud_infra = readComputeConfig()
if hypervisor == 'AWS':
aws_access_key_id = cloud_infra['awsCredentials']['accessKey']
aws_secret_key = cloud_infra['awsCredentials']['secretAccessKey']
if image != '' and image != 'NA':
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['amis'], image)
else:
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['instances'], instanceid)
if region == '' or region == 'NA':
region = amiDetails['regionName']
from llm.aws_instance_api import get_instance_ip
return get_instance_ip(aws_access_key_id, aws_secret_key, instanceid,region)
elif hypervisor == 'GCP':
#print(hypervisor,instanceid)
server = llm_utils.hypervisor( hypervisor,instanceid)
if server.is_machine_running():
return server.ip
else:
return ''
except Exception as e:
print(e)
raise Exception
def kill_inference_server(cloudconfig,instanceid,hypervisor,region,image):
# with open(cloudconfig, 'r') as config_f:
# cloud_infra = json.load(config_f)
# config_f.close()
from appbe.compute import readComputeConfig
cloud_infra = readComputeConfig()
if hypervisor == 'AWS':
ip = get_ip(cloudconfig,instanceid,hypervisor,region,image)
if ip == '':
print("Machine is not running.")
else:
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['instances'], instanceid)
currentDirectory = os.path.dirname(os.path.abspath(__file__))
pem_file = os.path.join(currentDirectory,amiDetails['ssh']['keyFilePath'])
username = amiDetails['ssh']['userName']
command = 'pkill -f'+ ' '+command_stop_service
from llm.ssh_command import run_ssh_cmd
buf = run_ssh_cmd(ip, pem_file, username, '', '', command)
elif hypervisor == 'GCP':
server = llm_utils.hypervisor( hypervisor,instanceid)
if server.is_machine_running():
ssh = server.ssh_details()
pem_file = str(Path(__file__).parent/ssh['keyFilePath'])
from llm.ssh_command import run_ssh_cmd
command = 'pkill -f'+ ' '+command_stop_service
buf = run_ssh_cmd(server.ip, pem_file, ssh['userName'],'','',command)
else:
raise Exception("Error")
def LLM_publish(cloudconfig,instanceid,hypervisor,model,usecaseid,region,image):
from appbe.compute import readComputeConfig
cloud_infra = readComputeConfig()
# with open(cloudconfig, 'r') as config_f:
# cloud_infra = json.load(config_f)
# config_f.close()
if hypervisor == 'AWS':
aws_access_key_id = cloud_infra['awsCredentials']['accessKey']
aws_secret_key = cloud_infra['awsCredentials']['secretAccessKey']
if image != '' and image != 'NA':
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['amis'], image)
else:
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['instances'], instanceid)
if region == '' or region == 'NA':
region = amiDetails['regionName']
from llm.aws_instance_api import start_instance
status,msg,ip = start_instance(aws_access_key_id, aws_secret_key, instanceid, region)
print(status,msg,ip)
if status.lower() == 'success':
currentDirectory = os.path.dirname(os.path.abspath(__file__))
pem_file = os.path.join(currentDirectory,amiDetails['ssh']['keyFilePath'])
username = amiDetails['ssh']['userName']
command = command_prepare_model + ' ' + usecaseid + ' '+ str(model)
print(command)
from llm.ssh_command import run_ssh_cmd
buf = run_ssh_cmd(ip, pem_file, username, '', '', command)
if "Error" in buf:
print("Error in Merging model")
raise Exception("Error in Merging model")
print("merging finished")
command = command_start_service+' '+ usecaseid
buf = run_ssh_cmd(ip, pem_file, username, '', '', command)
print("inference server running")
return buf
else:
print(msg)
return msg
elif hypervisor == 'GCP':
amiDetails = getAMIDetails(cloud_infra['GCP']['instances'], instanceid)
if region == '' or region == 'NA':
region = amiDetails['regionName']
server = llm_utils.hypervisor(hypervisor,instanceid)
if not server.is_machine_running():
started, msg = server.start()
if not started:
raise ValueError( msg)
ssh = server.ssh_details()
pem_file = str(Path(__file__).parent/ssh['keyFilePath'])
from llm.ssh_command import run_ssh_cmd
#print(model)
#print(usecaseid)
command = command_prepare_model + ' ' + usecaseid + ' '+ str(model)
buf = run_ssh_cmd(server.ip, pem_file, ssh['userName'], '', '', command)
if "Error" in buf:
print("Error in Merging model")
raise Exception("Error in Merging model")
#print("merging finished")
command = command_start_service+' '+ usecaseid
buf = run_ssh_cmd(server.ip, pem_file, ssh['userName'], '', '', command)
#print("inference server running")
return buf
else:
print("Not configured for gcp")
raise Exception("Eror")
def LLM_predict(cloudconfig,instanceid,promptfile,hypervisor,model,usecaseid,region,image,temperature,maxtokens,modelType):
from appbe.compute import readComputeConfig
cloud_infra = readComputeConfig()
try:
temperature = float(temperature)
except:
temperature = 0.4
try:
maxtokens = int(maxtokens)
except:
maxtokens = 2048
print("====")
print(float(temperature))
print("====")
if hypervisor == 'AWS':
aws_access_key_id = cloud_infra['awsCredentials']['accessKey']
aws_secret_key = cloud_infra['awsCredentials']['secretAccessKey']
currentDirectory = os.path.dirname(os.path.abspath(__file__))
if image != '' and image != 'NA':
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['amis'], image)
else:
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['instances'], instanceid)
if region == '' or region == 'NA':
region = amiDetails['regionName']
from llm.aws_instance_api import start_instance
#print(aws_access_key_id, aws_secret_key, instanceid, region)
status,msg,ip = start_instance(aws_access_key_id, aws_secret_key, instanceid, region)
if status.lower() == 'success':
pem_file = os.path.join(currentDirectory,amiDetails['ssh']['keyFilePath'])
username = amiDetails['ssh']['userName']
from llm.ssh_command import copy_files_to_server
#print(ip,pem_file,promptfile,'',username,'',remote_data_dir,remote_config_dir)
copy_files_to_server(ip,pem_file,promptfile,'',username,'',remote_data_dir,remote_config_dir)
promptfile = os.path.basename(promptfile)
if modelType == 'BaseModel':
command = prompt_command + ' ' + 'BaseModel' + ' ' + remote_data_dir + '/' + promptfile + ' ' + str(
model) + ' ' + str(temperature) + ' ' + str(maxtokens)
else:
command = prompt_command+' '+usecaseid+' '+remote_data_dir+'/'+ promptfile+' '+str(model)+' '+str(temperature)+' '+str(maxtokens)
print(command)
from llm.ssh_command import run_ssh_cmd
buf = run_ssh_cmd(ip, pem_file, username,'','',command)
return buf
else:
return msg
else:
server = llm_utils.hypervisor( hypervisor,instanceid)
if not server.is_machine_running():
started, msg = server.start()
if not started:
raise ValueError( msg)
ssh = server.ssh_details()
pem_file = str(Path(__file__).parent/ssh['keyFilePath'])
from llm.ssh_command import copy_files_to_server
copy_files_to_server(server.ip,pem_file,promptfile,'',ssh['userName'],'',remote_data_dir,remote_config_dir)
promptfile = os.path.basename(promptfile)
if modelType == 'BaseModel':
command = prompt_command + ' ' + 'BaseModel' + ' ' + remote_data_dir + '/' + promptfile + ' ' + str(
model) + ' ' + str(temperature) + ' ' + str(maxtokens)
else:
command = prompt_command+' '+usecaseid+' '+remote_data_dir+'/'+ promptfile+' '+str(model)+' '+str(temperature)+' '+str(maxtokens)
#command = '/home/aion/llm/sbin/llm_model_finetuning.sh'
#print(command)
from llm.ssh_command import run_ssh_cmd
#print(ssh['userName'],pem_file)
buf = run_ssh_cmd(server.ip, pem_file, ssh['userName'],'','',command)
return buf<s> import json
import os
import time
from pathlib import Path
BASE_DIR = '/home/aion'
DATA_DIR = '/home/aion/data'
CONFIG_DIR = '/home/aion/data/config'
PROMPT_DATA_DIR = '/home/aion/data/prompt_data'
CODE_DIR = '/home/aion/llm/sbin'
def remote_base_dir(as_str=False):
if as_str:
return BASE_DIR
return Path( BASE_DIR)
def remote_data_dir(as_str=False):
if as_str:
return DATA_DIR
return Path( DATA_DIR)
def remote_config_dir(as_str=False):
if as_str:
return CONFIG_DIR
return Path( CONFIG_DIR)
def remote_code_dir(as_str=False):
if as_str:
return CODE_DIR
return Path( CODE_DIR)
def remote_prompt_data_dir(as_str=False):
if as_str:
return DATA_DIR
return Path( DATA_DIR)
def get_ami_details(config,selectedAMI):
y = {}
for x in config:
if x['id'] == selectedAMI:
return x
return y
def get_ip(cloudconfig,instanceid,hypervisor,region,image):
from AION.appbe.compute import readComputeConfig
cloud_infra = readComputeConfig()
if hypervisor == 'AWS':
aws_access_key_id = cloud_infra['awsCredentials']['accessKey']
aws_secret_key = cloud_infra['awsCredentials']['secretAccessKey']
if image != '' and image != 'NA':
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['amis'], image)
else:
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['instances'], instanceid)
if region == '' or region == 'NA':
region = amiDetails['regionName']
from llm.aws_instance_api import get_instance_ip
return get_instance_ip(aws_access_key_id, aws_secret_key, instanceid,region)
elif hypervisor == 'GCP':
credentialsJson = cloud_infra['gcpCredentials']['gcpCredentials']
amiDetails = getAMIDetails(cloud_infra['GCP']['instances'], instanceid)
zone = amiDetails['regionName']
projectID = cloud_infra['gcpCredentials']['projectID']
from llm.gcp_compute_api import check_instance
status,ip = check_instance(credentialsJson,projectID, zone, instanceid)
return ip
else:
raise ValueError(f"Given hypervisor '{hypervisor}' is not supported")
def hypervisor( hypervisor,instanceid='', image=''):
if not hypervisor:
raise ValueError('No hypervisor provided')
if hypervisor.lower() == 'aws':
return aws_server(instanceid, image)
elif hypervisor.lower() == 'gcp':
return gcp_server(instanceid, image)
else:
raise ValueError(f"Hyperviser '{hypervisor}' is not supported")
class gcp_server():
def __init__( self, instanceid='', image=''):
self.hypervisor_name = 'gcp'
from AION.appbe.compute import readComputeConfig
self.cloud_infra = readComputeConfig()
if image and image.lower() != 'na':
self.ami_details = get_ami_details(self.cloud_infra['GCP']['machineImage'], image)
self.instanceid = ''
elif instanceid and instanceid.lower() != 'na':
self.ami_details = get_ami_details( self.cloud_infra['GCP']['instances'], instanceid)
self. |
instanceid = instanceid
else:
raise ValueError("Either provide 'image name' or 'instance id'")
self.credentialsJson = self.cloud_infra['gcpCredentials']['gcpCredentials']
self.projectID = self.cloud_infra['gcpCredentials']['projectID']
self.zone = self.ami_details['regionName']
self.stopped = False
self.ip = ''
self.created = False
def is_machine_running(self):
from llm.gcp_compute_api import check_instance
status,self.ip = check_instance(self.credentialsJson,self.projectID,self.zone,self.instanceid)
return 'running' == status.lower()
def start(self):
from AION.llm.gcp_compute_api import is_running
from AION.llm.gcp_compute_api import check_instance
from AION.llm.gcp_compute_api import start_instance
status = is_running(self.credentialsJson, self.projectID, self.zone, self.instanceid).lower()
if 'running' == status:
stratus, self.ip = check_instance(self.credentialsJson, self.projectID, self.zone, self.instanceid)
self.already_running = True
return True
else:
status, self.ip = start_instance(self.credentialsJson, self.projectID, self.zone, self.instanceid)
return status == 'Success'
def create(self,key_name = None):
from AION.llm.gcp_compute_api import create_instance
machineImageName = self.ami_details['id']
machineImageProjectID = self.ami_details['machineImageProjectID']
self.ip,msg = create_instance(self.credentialsJson,self.projectID,self.zone,key_name,machineImageName,machineImageProjectID)
if self.ip != '':
self.created = True
return self.ip,msg
def stop(self):
if self.already_running or self.stopped:
return True
from AION.llm.gcp_compute_api import stop_server_instance
status = stop_server_instance(self.credentialsJson,self.projectID, self.zone, self.instanceid)
if status == 'Process Completed':
self.stopped = True
return True
return False
def showndown(self):
if self.created:
self.terminate()
else:
if self.already_running or self.stopped:
return True
from AION.llm.gcp_compute_api import stop_server_instance
status = stop_server_instance(self.credentialsJson,self.projectID, self.zone, self.instanceid)
if status == 'Process Completed':
self.stopped = True
return True
return False
def terminate(self):
from AION.llm.gcp_compute_api import terminate_instance
msg, status = terminate_instance(self.projectID, self.zone, self.instanceid)
print(msg)
return status == 'success'
def ssh_details(self):
return self.ami_details['ssh']
class aws_server():
def __init__(self, instanceid='', image='', boot_up_time=0):
self.hypervisor_name = 'aws'
from AION.appbe.compute import readComputeConfig
self.cloud_infra = readComputeConfig()
if image and image.lower() != 'na':
self.ami_details = get_ami_details(self.cloud_infra['AWS_EC2']['amis'], image)
self.instanceid = ''
self.machine_type = 'ami'
elif instanceid and instanceid.lower() != 'na':
self.ami_details = get_ami_details( self.cloud_infra['AWS_EC2']['instances'], instanceid)
self.instanceid = instanceid
self.machine_type = 'instance'
else:
raise ValueError("Either provide 'image name' or 'instance id'")
self.access_key = self.cloud_infra['awsCredentials']['accessKey']
self.secret_key = self.cloud_infra['awsCredentials']['secretAccessKey']
self.securitygroupid = self.cloud_infra['awsCredentials']['securitygroupid']
self.region = self.ami_details['regionName']
self.already_running = False
self.boot_up_time = boot_up_time
self.stopped = False
self.created = False
def is_already_running(self):
return self.already_running
def is_machine_running(self):
from AION.llm.aws_instance_api import is_running
status = is_running(self.instanceid, self.region, self.access_key, self.secret_key).lower()
return 'running' == status.lower()
def start(self):
from AION.llm.aws_instance_api import is_running
from AION.llm.aws_instance_api import get_instance_ip
from AION.llm.aws_instance_api import start_instance
status = is_running(self.instanceid, self.region, self.access_key, self.secret_key).lower()
if 'running' == status:
self.ip = get_instance_ip(self.access_key, self.secret_key, self.instanceid, self.region)
self.already_running = True
return True, 'already running'
else:
status, msg, self.ip = start_instance(self.access_key, self.secret_key, self.instanceid, self.region)
return status == 'Success', msg
def create(self, key_name=None):
from AION.llm.aws_instance_api import create_instance
image_id = self.ami_details['id']
security_group_ids = self.ami_details['instanceSetting']['securityGroupIds']
if not key_name:
key_name = self.ami_details['instanceSetting']['keyName']
instance_type = self.ami_details['instanceSetting']['instanceType']
self.instanceid,msg = create_instance(image_id, instance_type, self.securitygroupid, self.region, key_name,
self.access_key, self.secret_key)
if self.instanceid != '':
self.created = True
return self.instanceid,msg
def showndown(self):
from AION.llm.aws_instance_api import stop_server_instance
if self.created:
return self.terminate()
else:
if self.already_running or self.stopped:
return True
status = stop_server_instance(self.access_key, self.secret_key, self.instanceid, self.region)
if status == 'Process Completed':
self.stopped = True
return True
return False
def stop(self):
from AION.llm.aws_instance_api import stop_server_instance
if self.already_running or self.stopped:
return True
status = stop_server_instance(self.access_key, self.secret_key, self.instanceid, self.region)
if status == 'Process Completed':
self.stopped = True
return True
return False
def terminate(self):
from AION.llm.aws_instance_api import terminate_instance
msg, status = terminate_instance(self.instanceid, self.region, self.access_key, self.secret_key)
print(msg)
return status == 'success'
def ssh_details(self):
return self.ami_details['ssh']
<s> from google.cloud import compute_v1
import os
PROJECT_ID = 'ers-research'
ZONE = 'us-west1-b'
INSTANCE_NAME = 'aion-llm-a100-vm1'
MACHINE_IMAGE_NAME = 'aion-40gb-a100-image'
MACHINE_IMAGE_PROJECT_ID = 'ers-research'
def create_instance(credentialsJson,project_id, zone, instance_name, machine_image_name, machine_image_project_id):
try:
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = credentialsJson
compute = compute_v1.InstancesClient()
machine_image = compute_v1.MachineImagesClient().get(project=machine_image_project_id, machine_image=machine_image_name)
instance = compute_v1.Instance()
instance.name = instance_name
instance.machine_type = f"zones/{zone}/machineTypes/a2-ultragpu-1g"
instance.source_machine_image = machine_image.self_link
boot_disk = compute_v1.AttachedDisk()
boot_disk.auto_delete = True
boot_disk.boot = True
instance.disks = [boot_disk]
network_interface = compute_v1.NetworkInterface()
access_config = compute_v1.AccessConfig()
access_config.type = "ONE_TO_ONE_NAT"
network_interface.access_configs = [access_config]
instance.network_interfaces = [network_interface]
operation = compute.insert(project=project_id, zone=zone, instance_resource=instance)
operation.result()
instance = compute.get(project=project_id, zone=zone, instance=instance_name)
print("--->instace created ")
print(instance.network_interfaces[0])
return instance.network_interfaces[0].access_configs[0].nat_i_p,''
except Exception as e:
print(e)
return '',str(e)
def is_running(credentialsJson,project_id, zone, instance_name):
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = credentialsJson
compute = compute_v1.InstancesClient()
instance = compute.get(project=project_id, zone=zone, instance=instance_name)
status = instance.status
return status
def check_instance(credentialsJson,project_id, zone, instance_name):
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = credentialsJson
compute = compute_v1.InstancesClient()
instance = compute.get(project=project_id, zone=zone, instance=instance_name)
status = instance.status
if status.lower() == 'running':
print(instance.network_interfaces[0].access_configs[0].nat_i_p)
ip = instance.network_interfaces[0].access_configs[0].nat_i_p
else:
ip = ''
return status,ip
def start_instance(credentialsJson,project_id, zone, instance_name):
try:
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = credentialsJson
compute = compute_v1.InstancesClient()
operation = compute.start(project=project_id, zone=zone, instance=instance_name)
operation.result()
instance = compute.get(project=project_id, zone=zone, instance=instance_name)
status = instance.status
if status.lower() == 'running':
print(instance.network_interfaces[0].access_configs[0])
ip = instance.network_interfaces[0].access_configs[0].nat_i_p
else:
ip = ''
except Exception as e:
print(e)
status = 'Error'
ip = ''
return status,ip
def stop_instance(credentialsJson,project_id, zone, instance_name):
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = credentialsJson
compute = compute_v1.InstancesClient()
operation = compute.stop(project=project_id, zone=zone, instance=instance_name)
operation.result()
def terminate_instance(project_id, zone, instance_name):
try:
compute = compute_v1.InstancesClient()
operation = compute.delete(project=project_id, zone=zone, instance=instance_name)
operation.result()
return "","suceess"
except Exception as e:
return str(e),"error"
# if __name__ == '__main__':
# ip_address = create_instance(PROJECT_ID, ZONE, INSTANCE_NAME, MACHINE_IMAGE_NAME, MACHINE_IMAGE_PROJECT_ID)
# print(f"IP address of the new VM: {ip_address}")
# #start_instance(PROJECT_ID, ZONE, INSTANCE_NAME)
# # stop_instance(PROJECT_ID, ZONE, INSTANCE_NAME)
# # terminate_instance(PROJECT_ID, ZONE, INSTANCE_NAME)
<s>
class CachePrompt:
tableName = 'cachePrompts'
def __init__(self, conn):
self.conn = conn
def selectFromCache(self,prompt ,usecaseId ,modelType,temperature=None ,max_token=None):
searchFromLLMFlag = False
try:
query = f'''SELECT * FROM {CachePrompt.tableName} WHERE usecaseId= "{usecaseId}" AND prompt = "{prompt}" AND modelType = "{modelType}"'''
if temperature:
query += f''' AND temperature= "{temperature}"'''
if max_token:
query += f''' AND max_token= "{max_token}"'''
cursor = self.conn.execute(query)
results = [x for x in cursor.fetchone()]
column_names = list(map(lambda x:x[0],cursor.description))
response = results[column_names.index('response')]
return searchFromLLMFlag,response
except Exception as e:
print(e)
searchFromLLMFlag = True
return searchFromLLMFlag,''
def deleteRecord(self ,usecaseId,max_records=5):
q_exitingRecords = f'''SELECT count(*) FROM {CachePrompt.tableName} WHERE usecaseId= "{usecaseId}" '''
cursor = self.conn.execute(q_exitingRecords)
numberOfRecords = cursor.fetchone()[0]
if numberOfRecords >= max_records:
idForDeletion = f'SELECT * FROM {CachePrompt.tableName} WHERE usecaseId= "{usecaseId}" ORDER BY created_at ASC LIMIT 1;'
cursor = self.conn.execute(idForDeletion)
id2del =[x[0] for x in cursor][0]
sql_delete_query = f"""DELETE from {CachePrompt.tableName} WHERE id = {id2del};"""
self.conn.execute(sql_delete_query)
self.conn.commit()
def insertRecord(self,prompt,response,usecaseId ,modelType,temperature=None ,max_token=None, max_records=5):
self.conn.execute('''CREATE TABLE IF NOT EXISTS cachePrompts
(ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
prompt TEXT NOT NULL,
context TEXT ,
usecaseId text NOT NULL,
modelType text NOT NULL,
temperature float NOT NULL,
max_token INT,
response TEXT NOT NULL,
created_at TEXT DEFAULT CURRENT_TIMESTAMP );''')
cur = self.conn.execute(f"select * from {CachePrompt.tableName}").fetchall()
print(cur)
self.deleteRecord(usecaseId,max_records=5)
self.conn.execute(f"INSERT INTO {CachePrompt.tableName} (prompt, usecaseId,modelType,temperature,max_token,response) \\
VALUES (?, ?, ?,?, ?, ?)", (prompt, usecaseId,model |
Type,temperature, max_token, response));
self.conn.commit()
<s> import paramiko
from pathlib import Path
import logging
import json
import os
import sys
import pandas as pd
import time
import timeit
import re
running_state_code = 16
stopped_state_code = 80
#prompt_command = '/home/aion/AION/llm/sbin/run_inference.sh'
log_file_path = '/home/aion/data/log/fine_tuning_log.log'
def read_file_from_server(ip,username,password,pem_file,remote_file_name,localfilepath):
host = ip
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if pem_file != '':
client.connect(host, username=username, key_filename=pem_file)
else:
client.connect(host, username=username, password=password)
sftp = client.open_sftp()
sftp.get(remote_file_name,localfilepath)
sftp.close()
client.close()
def read_log_file(ip,username,password,pem_file):
host = ip
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if pem_file != '':
client.connect(host, username=username, key_filename=pem_file)
else:
client.connect(host, username=username, password=password)
#log_file_path = '/home/ubuntu/AION/data/log/fine_tuning_log.log'
stdin, stdout, stderr = client.exec_command(f'tail -n 500 {log_file_path}')
time.sleep(5)
client.close()
return stdout.read().decode()
def run_ssh_cmd(ip,pem_file,username,password,log,command):
try:
buf = ''
host = ip
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
startTime = timeit.default_timer()
while True:
try:
if pem_file != '':
client.connect(host, username=username, key_filename=pem_file)
else:
client.connect(host, username=username, password=password)
break
except Exception as e:
print(e)
dataLoadTime = timeit.default_timer() - startTime
if dataLoadTime >= 600:
break
time.sleep(10)
stdin, stdout, stderr =client.exec_command(command)
for line in stdout:
if log != '':
log.info(line)
else:
# if buf != '':
# buf= buf+'\\n'
buf = buf+line
client.close()
return buf
except Exception as e:
print(str(e))
raise Exception(str(e))
def copy_files_to_server(ip, pem_file,local_data_file_path,local_config_file_path,username,password,remote_data_dir,remote_config_dir):
try:
host = ip
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
startTime = timeit.default_timer()
while True:
try:
if pem_file != '':
client.connect(host, username=username, key_filename=pem_file)
elif password != '':
client.connect(host, username=username, password=password)
sftp = client.open_sftp()
break
except Exception as e:
print(e)
time.sleep(10)
dataLoadTime = timeit.default_timer() - startTime
if dataLoadTime >= 600:
break
try:
sftp.stat(remote_data_dir)
print(f"Path: '{remote_data_dir}' already exist")
except FileNotFoundError:
sftp.mkdir(remote_data_dir)
try:
sftp.stat(remote_config_dir)
print(f"Path: '{remote_config_dir}' already exist")
except FileNotFoundError:
sftp.mkdir(remote_config_dir)
Train_Data_InRemoteArtifacts = sftp.listdir(remote_data_dir)
for traindata_files in Train_Data_InRemoteArtifacts:
if not traindata_files.endswith('.ipynb_checkpoints'):
sftp.remove(remote_data_dir +'/'+ traindata_files)
if os.path.isdir(local_data_file_path):
list_pdf_json_files = os.listdir(local_data_file_path)
for file_pdf_json in list_pdf_json_files:
sftp.put(local_data_file_path+'/'+file_pdf_json, remote_data_dir+'/'+ file_pdf_json)
print(file_pdf_json + " data copied successfully")
else:
filename = os.path.basename(local_data_file_path)
directoryname= os.path.dirname(local_data_file_path)
sftp.put(directoryname+'/'+filename, remote_data_dir+'/'+ filename)
print(filename + " data copied successfully")
if local_config_file_path != '':
config_InRemoteArtifacts = sftp.listdir(remote_config_dir)
for config_file in config_InRemoteArtifacts:
print(config_file)
if not config_file.endswith('.ipynb_checkpoints'):
sftp.remove(remote_config_dir +'/'+ config_file)
if os.path.isdir(local_config_file_path):
list_pdf_json_files = os.listdir(local_config_file_path)
for file_pdf_json in list_pdf_json_files:
sftp.put(local_config_file_path+'/'+file_pdf_json, remote_config_dir+'/'+ file_pdf_json)
print(file_pdf_json + " config copied successfully")
else:
# updated_config_file_path = create_config(local_config_file_path)
filename = os.path.basename(local_config_file_path)
directoryname= os.path.dirname(local_config_file_path)
sftp.put(directoryname+'/'+filename, remote_config_dir+'/'+ 'config.json')
print(filename + " config copied successfully")
remote_files = sftp.listdir(remote_config_dir)
print("List of uploaded files",remote_files)
sftp.close()
client.close()
except Exception as e:
print(e)
<s> import json
import os
import sys
import pandas as pd
import time
from stat import S_ISDIR, S_ISREG
from pathlib import Path
import logging
import re
remote_data_dir = '/home/ubuntu/AION/data/storage'
remote_config_dir = '/home/ubuntu/AION/data/config'
sh_file_path = '/home/ubuntu/AION/llm/sbin/run_experiment.sh'
import os
import tarfile
def tardirectory(path,tarfilepath):
with tarfile.open(tarfilepath, 'w:tar') as tarhandle:
for root, dirs, files in os.walk(path):
for f in files:
tarhandle.add(os.path.join(root, f),arcname=f)
tarhandle.close()
def createCodeSummary(codedir,cloudconfig,filetype):
try:
from appbe.dataPath import DATA_FILE_PATH
filetimestamp = str(int(time.time()))
tarfilepath = os.path.join(DATA_FILE_PATH,filetimestamp+'.tar')
tardirectory(codedir,tarfilepath)
with open(cloudconfig, 'r') as config_f:
cloud_infra = json.load(config_f)
config_f.close()
aws_access_key_id = cloud_infra['AWS_EC2']['AWSAccessKeyID']
aws_secret_key = cloud_infra['AWS_EC2']['AWSSecretAccessKey']
instance_type = cloud_infra['AWS_EC2']['CodeSummarization']['InstanceSetting']['InstanceType']
security_group_id = cloud_infra['AWS_EC2']['CodeSummarization']['InstanceSetting']['SecurityGroupId']
region = cloud_infra['AWS_EC2']['CodeSummarization']['RegionName']
image_id = cloud_infra['AWS_EC2']['CodeSummarization']['amiId']
currentDirectory = os.path.dirname(os.path.abspath(__file__))
pem_file = os.path.join(currentDirectory,cloud_infra['AWS_EC2']['CodeSummarization']['ssh']['keyFilePath'])
username = cloud_infra['AWS_EC2']['CodeSummarization']['ssh']['userName']
instance_id = ''
if image_id != '':
from llm.aws_instance_api import create_instance
instance_id = create_instance(image_id, instance_type, security_group_id,region,instance_name,aws_access_key_id, aws_secret_key)
if instance_id == '':
return 'Failed','Instance Creation Failed'
if instance_id == '':
if cloud_infra['AWS_EC2']['CodeSummarization']['InstanceId'] != '':
instance_id = cloud_infra['AWS_EC2']['CodeSummarization']['InstanceId']
else:
return 'Failed','Instance Creation Failed.'
if instance_id != '':
from llm.aws_instance_api import start_instance
ip = start_instance(aws_access_key_id, aws_secret_key, instance_id,region)
if ip != '':
from llm.ssh_command import copy_files_to_server
copy_files_to_server(ip,pem_file,tarfilepath,'',username,'',remote_data_dir,remote_config_dir)
from llm.ssh_command import run_ssh_cmd
command = 'rm -r /home/ubuntu/AION/data/code'
buf = run_ssh_cmd(ip, pem_file, username,'','',command)
command = 'mkdir -p /home/ubuntu/AION/data/code'
buf = run_ssh_cmd(ip, pem_file, username,'','',command)
command = 'tar -xvf '+remote_data_dir+'/'+filetimestamp+'.tar -C /home/ubuntu/AION/data/code'
print(command)
buf = run_ssh_cmd(ip, pem_file, username,'','',command)
command = sh_file_path+' '+'/home/ubuntu/AION/data/code'+' '+filetype
print(command)
buf = run_ssh_cmd(ip, pem_file, username,'','',command)
from llm.ssh_command import read_file_from_server
filetimestamp = str(int(time.time()))
codesummar = os.path.join(DATA_FILE_PATH,filetimestamp+'.csv')
read_file_from_server(ip,username,'',pem_file,'/home/ubuntu/AION/data/storage/code_summararies.csv',codesummar)
return 'Success',codesummar
else:
return 'Failed','Instance Initialization Failed.'
else:
return 'Failed','Instance Initialization Failed . AMI/Instance is not configured. Please check with ERS Research'
except Exception as e:
print(e)
return 'Failed','Code Summarization Failed'<s> from .genetic_optimization import GeneticOptimizationCV<s> from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
import warnings
warnings.filterwarnings('ignore')
from genetic_selection import GeneticSelectionCV
def GeneticOptimizationCV(model,genetic_params,n_iter,scoring,cv):
n_generations = n_iter
estimator = model
selector = GeneticSelectionCV(estimator,cv=cv,**genetic_params,n_generations=n_generations,scoring=scoring)
return selector
<s> import pandas as pd
tab = ' '
VALID_AGGREGATION_METHODS = ['mean','sum']
VALID_GRANULARITY_UNITS = ['second','minute','hour','day','week','month','year']
VALID_INTERPOLATE_KWARGS = {'linear':{},'spline':{'order':5},'timebased':{}}
VALID_INTERPOLATE_METHODS = list( VALID_INTERPOLATE_KWARGS.keys())
def get_one_true_option(d, default_value=None):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
def get_boolean(value):
if (isinstance(value, str) and value.lower() == 'true') or (isinstance(value, bool) and value == True):
return True
else:
return False
def get_source_delta( data: pd.DataFrame):
MAX_SAMPLE_TRY = 20
if len( data) <= 1:
return None
time_delta = data.index[-1] - data.index[-2]
count = {}
for i in range(len(data)):
if i == MAX_SAMPLE_TRY or i == data.index[-1]:
break
delta = data.index[i+1] - data.index[i]
if delta not in count.keys():
count[delta] = 1
else:
count[delta] += 1
if count:
return max(count, key=count.get)
else:
return None
class timeSeries():
def __init__( self, config, datetime, log=None):
self.datetime = datetime
self.validate_config(config)
self.log = log
def validate_config( self, config):
if not self.datetime or self.datetime.lower() == 'na':
raise ValueError('date time feature is not provided')
self.config = {}
method = get_one_true_option(config.get('interpolation',None))
self.config['interpolate'] = {}
self.config['interpolate']['enabled'] = method in VALID_INTERPOLATE_METHODS
self.config['interpolate']['method'] = method
self.config['rolling'] = {}
self.config['rolling']['enabled'] = get_boolean( config.get('rollingWindow',False))
self.config['rolling']['size'] = int( config.get('rollingWindowSize',1))
if self.config['rolling']['size'] < 1:
raise ValueError('Rolling window size should be greater than 0.')
self.config['aggregation'] = {}
aggregation = config.get('aggregation',{ |
})
agg_method = get_one_true_option(aggregation['type'])
self.config['aggregation'] = {}
self.config['aggregation']['enabled'] = agg_method in VALID_AGGREGATION_METHODS
self.config['aggregation']['method'] = agg_method
granularity = aggregation.get('granularity',{})
granularity_unit = get_one_true_option( granularity.get('unit',None))
if granularity_unit in VALID_GRANULARITY_UNITS:
granularity_mapping = {'second':'S','minute':'Min','hour':'H','day':'D','week':'W','month':'M','year':'Y'}
size = int(granularity.get('size',10))
granularity_unit = granularity_mapping.get(granularity_unit,granularity_unit)
self.config['aggregation']['granularity'] = {}
self.config['aggregation']['granularity']['unit'] = granularity_unit
self.config['aggregation']['granularity']['size'] = size
def log_info(self, msg, type='info'):
if self.log:
if type == 'error':
self.log.error( msg)
else:
self.log.info( msg)
else:
print( msg)
def is_down_sampling(self, data, size, granularity_unit):
down_sampling = False
if granularity_unit in ['M', 'Y']:
return True
else:
target_delta = pd.Timedelta(size , granularity_unit)
source_delta = get_source_delta(data)
if not source_delta:
raise ValueError('Could not find the data frame time frequency')
return source_delta < target_delta
def run( self, data):
if self.datetime not in data.columns:
raise ValueError(f"Date time feature '{self.datetime}' is not present in data")
try:
# data[self.datetime] = pd.to_datetime( data[self.datetime])
##For bug 13513 - If the datetime needs UTC timestamp process, except part will handle.
try:
#for non utc timestamp
data[self.datetime] = pd.to_datetime( data[self.datetime])
except:
#for utc timestamp
data[self.datetime] = pd.to_datetime( data[self.datetime],utc=True)
data.set_index( self.datetime, inplace=True)
except:
raise ValueError(f"can not convert '{self.datetime}' to dateTime")
if self.config.get('interpolate',{}).get('enabled',False):
method = self.config['interpolate']['method']
self.log_info(f"Applying Interpolation using {method}")
methods_mapping = {'timebased': 'time'}
self.config['interpolate']['mapped_method'] = methods_mapping.get(method, method)
data.interpolate(method=self.config['interpolate']['mapped_method'], inplace=True, **VALID_INTERPOLATE_KWARGS[method])
if self.config.get('rolling',{}).get('enabled',False):
if self.config['rolling']['size'] > len( data):
raise ValueError('Rolling window size is greater than dataset size')
self.log_info(f"Applying rolling window( moving avg) with size {self.config['rolling']['size']}")
data = data.rolling( self.config['rolling']['size']).mean()
data = data.iloc[self.config['rolling']['size'] - 1:]
aggregation = self.config.get('aggregation',{})
if aggregation.get('enabled',False):
method = aggregation.get('method','mean')
self.rule = str(aggregation['granularity']['size']) + aggregation['granularity']['unit']
if self.is_down_sampling(data, aggregation['granularity']['size'], aggregation['granularity']['unit']):
self.log_info(f"Applying down sampling( {self.rule})")
if method == 'mean':
data = data.resample( self.rule).mean()
elif method == 'sum':
data = data.resample( self.rule).sum()
else:
self.log_info(f"Applying up sampling using forward fill method( {self.rule})")
data = data.resample( self.rule).ffill()
data.reset_index( inplace=True, names=self.datetime)
return data
def get_code(self, indent=0):
tab = ' '
code = ''
code += f"""
def preprocess( data):
try:
#for non utc timestamp
data['{self.datetime}'] = pd.to_datetime( data['{self.datetime}'])
except:
data['{self.datetime}'] = pd.to_datetime( data['{self.datetime}'],utc=True)
data.set_index( '{self.datetime}', inplace=True)
"""
if self.config.get('interpolate',{}).get('enabled',False):
code += tab + f"data.interpolate(method='{self.config['interpolate']['mapped_method']}', inplace=True, **{VALID_INTERPOLATE_KWARGS[self.config['interpolate']['method']]})\\n"
if self.config.get('rolling',{}).get('enabled',False):
code += tab + f"data = data.rolling( {self.config['rolling']['size']}).mean().iloc[{self.config['rolling']['size'] - 1}:]\\n"
if self.config.get('aggregation',{}).get('enabled',False):
code += tab + f"data = data.resample( '{self.rule}').{self.config.get('aggregation',{}).get('method','mean')}()\\n"
code += tab + f"data.reset_index( inplace=True, names='{self.datetime}')\\n"
code += tab + "return data\\n"
return code
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import numpy as np
import pandas as pd
import sys
import os
import warnings
import logging
from pathlib import Path
import random
from sklearn.model_selection import train_test_split
import operator
import re
import pdfplumber
class dataReader():
def __init__(self):
self.dataDf =None
self.log = logging.getLogger('eion')
def readCsv(self,dataPath,featureList,targetColumn):
data=pd.read_csv(dataPath)
dataDf=data[featureList]
predictDf=data[targetColumn]
return dataDf,predictDf
def rowsfilter(self,filters,dataframe):
self.log.info('\\n-------> No of rows before filtering: '+str(dataframe.shape[0])) #task-13479
filterexpression=''
firstexpressiondone = False
for x in filters:
if firstexpressiondone:
filterexpression += ' '
if x['combineOperator'].lower() == 'and':
filterexpression += '&'
elif x['combineOperator'].lower() == 'or':
filterexpression += '|'
filterexpression += ' '
firstexpressiondone = True
filterexpression += x['feature']
filterexpression += ' '
if x['condition'].lower() == 'equals':
filterexpression += '=='
elif x['condition'].lower() == 'notequals':
filterexpression += '!='
elif x['condition'].lower() == 'lessthan':
filterexpression += '<'
elif x['condition'].lower() == 'lessthanequalto':
filterexpression += '<='
elif x['condition'].lower() == 'greaterthan':
filterexpression += '>'
elif x['condition'].lower() == 'greaterthanequalto':
filterexpression += '>='
filterexpression += ' '
if dataframe[x['feature']].dtype in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']:
filterexpression += x['value']
else:
filterexpression += '\\''+x['value']+'\\''
dataframe = dataframe.query(filterexpression)
self.log.info('-------> Row filter: '+str(filterexpression)) #task-13479
self.log.info('-------> No of rows after filtering: '+str(dataframe.shape[0]))
return dataframe,filterexpression
def grouping(self,grouper,dataframe):
grouperbyjson= {}
groupbyfeatures = grouper['groupby']
dataframe = dataframe.reset_index()
features = dataframe.columns.tolist()
aggjson = {}
for feature, featureType in zip(features,dataframe.dtypes):
if feature == groupbyfeatures or feature == 'index':
continue
if dataframe[feature].empty == True:
continue
if dataframe[feature].isnull().all() == True:
continue
if featureType in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']:
temp = {}
temp[feature+'_size'] = 'size'
temp[feature+'_sum'] = 'sum'
temp[feature+'_max'] = 'max'
temp[feature+'_min'] = 'min'
temp[feature+'_mean'] = 'mean'
aggjson[feature] = temp
else:
temp = {}
temp[feature+'_size'] = 'size'
temp[feature+'_unique'] = 'nunique'
aggjson[feature] = temp
groupbystring = 'groupby([\\''+groupbyfeatures+'\\']).agg('+str(aggjson)+')'
grouperbyjson['groupbystring'] = groupbystring
dataframe = dataframe.groupby([groupbyfeatures]).agg(aggjson)
dataframe.columns = dataframe.columns.droplevel(0)
dataframe = dataframe.reset_index()
'''
if operation.lower() == 'size':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).size()
elif operation.lower() == 'mean':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).mean()
elif operation.lower() == 'max':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).max()
elif operation.lower() == 'min':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).min()
dataframe = dataframe.rename("groupby_value")
dataframe = dataframe.to_frame()
dataframe = dataframe.reset_index()
'''
return dataframe,grouperbyjson
def timeGrouping(self,timegrouper,dataframe):
grouperbyjson= {}
dateTime = timegrouper['dateTime']
frequency = timegrouper['freq']
groupbyfeatures = timegrouper['groupby']
grouperbyjson['datetime'] = dateTime
if dataframe[dateTime].dtypes in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']:
dtlenth = dataframe[dateTime].iloc[0]
dtlenth = np.int64(dtlenth)
dtlenth = len(str(dtlenth))
if dtlenth == 13:
dataframe['date'] = pd.to_datetime(dataframe[dateTime],unit='ms')
grouperbyjson['unit'] = 'ms'
elif dtlenth == 10:
dataframe['date'] = pd.to_datetime(dataframe[dateTime],unit='s')
grouperbyjson['unit'] = 's'
else:
dataframe['date'] = pd.to_datetime(dataframe[dateTime])
grouperbyjson['unit'] = ''
else:
dataframe['date'] = pd.to_datetime(dataframe[dateTime])
grouperbyjson['unit'] = ''
dataframe = dataframe.reset_index()
dataframe.set_index('date',inplace=True)
features = dataframe.columns.tolist()
aggjson = {}
for feature, featureType in zip(features,dataframe.dtypes):
if feature == groupbyfeatures or feature == dateTime or feature == 'index':
continue
if dataframe[feature].empty == True:
continue
if dataframe[feature].isnull().all() == True:
continue
if featureType in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']:
temp = {'size','sum','max','min','mean'}
aggjson[feature] = temp
else:
temp = {'size','nunique'}
aggjson[feature] = temp
if groupbyfeatures == '':
groupbystring = 'groupby([pd.Grouper(freq=\\''+frequency+'\\')]).agg('+str(aggjson)+')'
else:
groupbystring = 'groupby([pd.Grouper(freq=\\''+frequency+'\\'),\\''+groupbyfeatures+'\\']).agg('+str(aggjson)+')'
grouperbyjson['groupbystring'] = groupbystring
print(grouperbyjson)
if groupbyfeatures == '':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency)]).agg(aggjson)
else:
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).agg(aggjson)
dataframe.columns = ['_'.join(col) for col in dataframe.columns]
dataframe = dataframe.reset_index()
self.log.info(dataframe.head(10))
'''
if operation.lower() == 'size':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).size()
elif operation.lower() == 'mean':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).mean()
elif operation.lower() == 'max':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).max()
elif operation.lower() == 'min':
data |
frame = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).min()
dataframe = dataframe.rename("groupby_value")
dataframe = dataframe.to_frame()
dataframe = dataframe.reset_index()
'''
return dataframe,grouperbyjson
def readDf(self,dataF,featureList,targetColumn):
dataDf = dataF[featureList]
predictDf =dataF[targetColumn]
return dataDf,predictDf
def csvTodf(self,dataPath,delimiter,textqualifier):
'''
if os.path.splitext(dataPath)[1] == ".tsv":
dataFrame=pd.read_csv(dataPath,encoding='latin1',sep='\\t')
else:
dataFrame=pd.read_csv(dataPath,encoding='latin1')
'''
if os.path.splitext(dataPath)[1] == ".py":
f = open(dataPath, "r")
pythoncode = f.read()
f.close()
ldict = {}
exec(pythoncode, globals(), ldict)
dataFrame = ldict['dfpy']
else:
dataFrame=pd.read_csv(dataPath,encoding='utf-8',sep=delimiter,quotechar=textqualifier, skipinitialspace = True,na_values=['-','?'],encoding_errors= 'replace')
dataFrame.rename(columns=lambda x: x.strip(), inplace=True)
return dataFrame
def read_file(self, fileName):
fileName = Path(fileName)
if fileName.suffix == '.pdf':
pdf = pdfplumber.open(fileName)
text = ''
for index, page in enumerate(pdf.pages):
if index:
text += ' '
text += page.extract_text()
else:
with open(fileName, "r",encoding="utf-8") as f:
text = f.read()
return text
def documentsTodf(self,folderlocation,labelFilePath):
dataDf = pd.DataFrame()
error_message = ""
dataset_csv_file = os.path.join(folderlocation,labelFilePath)
labels = pd.read_csv(dataset_csv_file)
dataDict = {}
keys = ["File","Label"]
for key in keys:
dataDict[key] = []
for i in range(len(labels)):
filename = os.path.join(folderlocation,labels.loc[i,"File"])
dataDict["File"].append(self.read_file(filename))
dataDict["Label"].append(labels.loc[i,"Label"])
dataDf = pd.DataFrame.from_dict(dataDict)
error_message = ""
return dataDf, error_message
def removeFeatures(self,df,datetimeFeature,indexFeature,modelFeatures,targetFeature):
self.log.info("\\n---------- Prepare Features ----------")
if(str(datetimeFeature).lower() != 'na'):
datetimeFeature = datetimeFeature.split(",")
datetimeFeature = list(map(str.strip, datetimeFeature))
for dtfeature in datetimeFeature:
if dtfeature in df.columns:
self.log.info("-------> Remove Date Time Feature: "+dtfeature)
df = df.drop(columns=dtfeature)
if(str(indexFeature).lower() != 'na'):
indexFeature = indexFeature.split(",")
indexFeature = list(map(str.strip, indexFeature))
for ifeature in indexFeature:
if ifeature in df.columns:
self.log.info("-------> Remove Index Feature: "+ifeature)
df = df.drop(columns=ifeature)
if(str(modelFeatures).lower() != 'na'):
self.log.info("-------> Model Features: "+str(modelFeatures))
modelFeatures = modelFeatures.split(",")
modelFeatures = list(map(str.strip, modelFeatures))
if(targetFeature != '' and str(targetFeature).lower() != 'na'):
targetFeature = targetFeature.split(",")
targetFeature = list(map(str.strip, targetFeature))
for ifeature in targetFeature:
if ifeature not in modelFeatures:
modelFeatures.append(ifeature)
if(str(indexFeature).lower() != 'na'):
for ifeature in indexFeature:
if ifeature in modelFeatures:
modelFeatures.remove(ifeature)
if(str(datetimeFeature).lower() != 'na'):
for dtfeature in datetimeFeature:
if dtfeature in modelFeatures:
modelFeatures.remove(dtfeature)
df = df[modelFeatures]
self.log.info("---------- Prepare Features End ----------")
return(df)
def splitImageDataset(self, df, ratio, modelType):
if modelType.lower() == "objectdetection":
images = df['File'].unique().tolist()
trainImages = random.sample(images, int(len(images) * ratio))
mask = [0] * len(df)
for i in range(len(df)):
mask[i] = df.iloc[i]['File'] in trainImages
trainDf = df.iloc[mask]
testDf = df.iloc[[not elem for elem in mask]]
return trainDf, testDf
else:
return train_test_split(df, test_size=(1 - ratio))
def createTFRecord(self, train_image_dir, output_dir, csv_file, testPercentage, AugEnabled,keepAugImages,operations, modelType,augConf={}):
from transformations import generate_tfrecord
from transformations.imageAug import ImageAugmentation
if isinstance(csv_file, pd.DataFrame):
df = csv_file
else:
df = pd.read_csv(os.path.join(train_image_dir,csv_file))
labelmap_path, num_classes = generate_tfrecord.createLabelFile(df, output_dir)
train_df, test_df = self.splitImageDataset(df, testPercentage/100.0, modelType)
if AugEnabled:
augFile = os.path.join(output_dir,"tempTrainDf.csv")
train_df.to_csv(augFile)
ia = ImageAugmentation(train_image_dir, augFile)
augFile = ia.augment(modelType, operations,None,augConf)
train_df = pd.read_csv(augFile)
generate_tfrecord.generate_TF_record(train_image_dir, output_dir, train_df, test_df, labelmap_path)
if AugEnabled and not keepAugImages:
ia.removeAugmentedImages(train_df)
return train_df, num_classes
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
#System imports
import os
import sys
import json
import datetime,time,timeit
import itertools
import numpy as np
import pandas as pd
import math
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.preprocessing import PowerTransformer
import logging
class dataTransformer():
def __init__(self):
self.log = logging.getLogger('eion')
def startTransformer(self,df,features,target,transType):
scaler ='None'
if target in features:
features.remove(target)
transFeatures=features
transDfColumns=[]
dataframe=df[transFeatures]
#targetArray=np.array(df[target])
#targetArray.shape = (len(targetArray), 1)
self.log.info("Data Normalization has started")
if transType.lower() =='standardscaler':
scaler = StandardScaler().fit(dataframe)
transDf = scaler.transform(dataframe)
elif transType.lower() =='minmax':
scaler=MinMaxScaler().fit(dataframe)
transDf = scaler.transform(dataframe)
elif transType.lower() =='lognormal':
print(dataframe)
scaler = PowerTransformer(method='yeo-johnson', standardize=False).fit(dataframe)
transDf = scaler.transform(dataframe)
else:
self.log.info("Need to implement")
#features.append(target)
#scaledDf = pd.DataFrame(np.hstack((transDf, targetArray)),columns=features)
return transDf,features,scaler<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import io
import json
import logging
import pandas as pd
import sys
import numpy as np
from pathlib import Path
from word2number import w2n
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OrdinalEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.impute import SimpleImputer, KNNImputer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import FunctionTransformer
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.preprocessing import PowerTransformer
from sklearn.compose import ColumnTransformer
from sklearn.base import TransformerMixin
from sklearn.ensemble import IsolationForest
from category_encoders import TargetEncoder
try:
import transformations.data_profiler_functions as cs
except:
import data_profiler_functions as cs
if 'AION' in sys.modules:
try:
from appbe.app_config import DEBUG_ENABLED
except:
DEBUG_ENABLED = False
else:
DEBUG_ENABLED = False
log_suffix = f'[{Path(__file__).stem}] '
class profiler():
def __init__(self, xtrain, ytrain=None, target=None, encode_target = False, config={}, keep_unprocessed=[],data_path=None,log=None):
if not isinstance(xtrain, pd.DataFrame):
raise ValueError(f'{log_suffix}supported data type is pandas.DataFrame but provide data is of {type(xtrain)} type')
if xtrain.empty:
raise ValueError(f'{log_suffix}Data frame is empty')
if target and target in xtrain.columns:
self.target = xtrain[target]
xtrain.drop(target, axis=1, inplace=True)
self.target_name = target
elif ytrain:
self.target = ytrain
self.target_name = 'target'
else:
self.target = pd.Series()
self.target_name = None
self.data_path = data_path
self.encode_target = encode_target
self.label_encoder = None
self.data = xtrain
self.keep_unprocessed = keep_unprocessed
self.colm_type = {}
for colm, infer_type in zip(self.data.columns, self.data.dtypes):
self.colm_type[colm] = infer_type
self.numeric_feature = []
self.cat_feature = []
self.text_feature = []
self.wordToNumericFeatures = []
self.added_features = []
self.pipeline = []
self.dropped_features = {}
self.train_features_type={}
self.__update_type()
self.config = config
self.featureDict = config.get('featureDict', [])
self.output_columns = []
self.feature_expender = []
self.text_to_num = {}
self.force_numeric_conv = []
if log:
self.log = log
else:
self.log = logging.getLogger('eion')
self.type_conversion = {}
self.log_input_feat_info()
def log_input_feat_info(self):
if self.featureDict:
feature_df = pd.DataFrame(self.featureDict)
log_text = '\\nPreprocessing options:'
log_text += '\\n\\t'+str(feature_df.head( len(self.featureDict))).replace('\\n','\\n\\t')
self.log.info(log_text)
def log_dataframe(self, msg=None):
buffer = io.StringIO()
self.data.info(buf=buffer)
if msg:
log_text = f'Data frame after {msg}:'
else:
log_text = 'Data frame:'
log_text += '\\n\\t'+str(self.data.head(2)).replace('\\n','\\n\\t')
log_text += ('\\n\\t' + buffer.getvalue().replace('\\n','\\n\\t'))
self.log.info(log_text)
def transform(self):
if self.is_target_available():
if self.target_name:
self.log.info(f"Target feature name: '{self.target_name}'")
self.log.info(f"Target feature size: {len(self.target)}")
else:
self.log.info(f"Target feature not present")
self.log_dataframe()
print(self.data.info())
try:
self.process()
except Exception as e:
self.log.error(e, exc_info=True)
raise
pipe = FeatureUnion(self.pipeline)
try:
if self.text_feature: |
from text.textProfiler import set_pretrained_model
set_pretrained_model(pipe)
conversion_method = self.get_conversion_method()
process_data = pipe.fit_transform(self.data, y=self.target)
# save for testing
if DEBUG_ENABLED:
if not isinstance(process_data, np.ndarray):
process_data = process_data.toarray()
df = pd.DataFrame(process_data)
df.to_csv('debug_preprocessed.csv', index=False)
if self.text_feature and conversion_method == 'latentsemanticanalysis':
n_size = self.get_tf_idf_output_size( pipe)
dimensions = self.get_tf_idf_dimensions()
if n_size < dimensions or n_size > dimensions:
dimensions = n_size
from sklearn.decomposition import TruncatedSVD
reducer = TruncatedSVD( n_components = dimensions)
reduced_data = reducer.fit_transform( process_data[:,-n_size:])
text_process_idx = [t[0] for t in pipe.transformer_list].index('text_process')
pipe.transformer_list[text_process_idx][1].steps.append(('feature_reducer',reducer))
if not isinstance(process_data, np.ndarray):
process_data = process_data.toarray()
process_data = np.concatenate((process_data[:,:-n_size], reduced_data), axis=1)
last_step = self.feature_expender.pop()
self.feature_expender.append({'feature_reducer':list(last_step.values())[0]})
except EOFError as e:
if "Compressed file ended before the end-of-stream marker was reached" in str(e):
raise EOFError('Pretrained model is not downloaded properly')
self.update_output_features_names(pipe)
if not isinstance(process_data, np.ndarray):
process_data = process_data.toarray()
df = pd.DataFrame(process_data, index=self.data.index, columns=self.output_columns)
if self.is_target_available() and self.target_name:
df[self.target_name] = self.target
if self.keep_unprocessed:
df[self.keep_unprocessed] = self.data[self.keep_unprocessed]
self.log_numerical_fill()
self.log_categorical_fill()
self.log_normalization()
return df, pipe, self.label_encoder
def log_type_conversion(self):
if self.log:
self.log.info('----------- Inspecting Features -----------')
self.log.info('----------- Type Conversion -----------')
count = 0
for k, v in self.type_conversion.items():
if v[0] != v[1]:
self.log.info(f'-------> {k} -> from {v[0]} to {v[1]} : {v[2]}')
self.log.info('Status:- |... Feature inspection done')
def check_config(self):
removeDuplicate = self.config.get('removeDuplicate', False)
self.config['removeDuplicate'] = cs.get_boolean(removeDuplicate)
self.config['misValueRatio'] = float(self.config.get('misValueRatio', cs.default_config['misValueRatio']))
self.config['numericFeatureRatio'] = float(self.config.get('numericFeatureRatio', cs.default_config['numericFeatureRatio']))
self.config['categoryMaxLabel'] = int(self.config.get('categoryMaxLabel', cs.default_config['categoryMaxLabel']))
featureDict = self.config.get('featureDict', [])
if isinstance(featureDict, dict):
self.config['featureDict'] = []
if isinstance(featureDict, str):
self.config['featureDict'] = []
def process(self):
#remove duplicate not required at the time of prediction
self.check_config()
self.remove_constant_feature()
self.remove_empty_feature(self.config['misValueRatio'])
self.remove_index_features()
self.dropna()
if self.config['removeDuplicate']:
self.drop_duplicate()
#self.check_categorical_features()
#self.string_to_numeric()
self.process_target()
self.train_features_type = {k:v for k,v in zip(self.data.columns, self.data.dtypes)}
self.parse_process_step_config()
self.process_drop_fillna()
self.log_type_conversion()
self.update_num_fill_dict()
if DEBUG_ENABLED:
print(self.num_fill_method_dict)
self.update_cat_fill_dict()
self.create_pipeline()
self.text_pipeline(self.config)
self.apply_outlier()
if DEBUG_ENABLED:
self.log.info(self.process_method)
self.log.info(self.pipeline)
def is_target_available(self):
return (isinstance(self.target, pd.Series) and not self.target.empty) or len(self.target)
def process_target(self, operation='encode', arg=None):
if self.is_target_available():
# drop null values
self.__update_index( self.target.notna(), 'target')
if self.encode_target:
self.label_encoder = LabelEncoder()
self.target = self.label_encoder.fit_transform(self.target)
return self.label_encoder
return None
def is_target_column(self, column):
return column == self.target_name
def fill_default_steps(self):
num_fill_method = cs.get_one_true_option(self.config.get('numericalFillMethod',{}))
normalization_method = cs.get_one_true_option(self.config.get('normalization',{}),'none')
for colm in self.numeric_feature:
if num_fill_method:
self.fill_missing_value_method(colm, num_fill_method.lower())
if normalization_method:
self.fill_normalizer_method(colm, normalization_method.lower())
cat_fill_method = cs.get_one_true_option(self.config.get('categoricalFillMethod',{}))
cat_encode_method = cs.get_one_true_option(self.config.get('categoryEncoding',{}))
for colm in self.cat_feature:
if cat_fill_method:
self.fill_missing_value_method(colm, cat_fill_method.lower())
if cat_encode_method:
self.fill_encoder_value_method(colm, cat_encode_method.lower(), default=True)
def parse_process_step_config(self):
self.process_method = {}
user_provided_data_type = {}
for feat_conf in self.featureDict:
colm = feat_conf.get('feature', '')
if not self.is_target_column(colm):
if colm in self.data.columns:
user_provided_data_type[colm] = feat_conf['type']
if user_provided_data_type:
self.update_user_provided_type(user_provided_data_type)
self.fill_default_steps()
for feat_conf in self.featureDict:
colm = feat_conf.get('feature', '')
if not self.is_target_column(colm):
if colm in self.data.columns:
if feat_conf.get('fillMethod', None):
self.fill_missing_value_method(colm, feat_conf['fillMethod'].lower())
if feat_conf.get('categoryEncoding', None):
self.fill_encoder_value_method(colm, feat_conf['categoryEncoding'].lower())
if feat_conf.get('normalization', None):
self.fill_normalizer_method(colm, feat_conf['normalization'].lower())
if feat_conf.get('outlier', None):
self.fill_outlier_method(colm, feat_conf['outlier'].lower())
if feat_conf.get('outlierOperation', None):
self.fill_outlier_process(colm, feat_conf['outlierOperation'].lower())
def get_tf_idf_dimensions(self):
dim = cs.get_one_true_option(self.config.get('embeddingSize',{}).get('TF_IDF',{}), 'default')
return {'default': 300, '50d':50, '100d':100, '200d':200, '300d':300}[dim]
def get_tf_idf_output_size(self, pipe):
start_index = {}
for feat_expender in self.feature_expender:
if feat_expender:
step_name = list(feat_expender.keys())[0]
index = list(feat_expender.values())[0]
for transformer_step in pipe.transformer_list:
if transformer_step[1].steps[-1][0] in step_name:
start_index[index] = {transformer_step[1].steps[-1][0]: transformer_step[1].steps[-1][1].get_feature_names_out()}
if start_index:
for key,value in start_index.items():
for k,v in value.items():
if k == 'vectorizer':
return len(v)
return 0
def update_output_features_names(self, pipe):
columns = self.output_columns
start_index = {}
index_shifter = 0
for feat_expender in self.feature_expender:
if feat_expender:
step_name = list(feat_expender.keys())[0]
for key,value in start_index.items():
for k,v in value.items():
index_shifter += len(v)
index = list(feat_expender.values())[0]
for transformer_step in pipe.transformer_list:
if transformer_step[1].steps[-1][0] in step_name:
start_index[index + index_shifter] = {transformer_step[1].steps[-1][0]: transformer_step[1].steps[-1][1].get_feature_names_out()}
#print(start_index)
if start_index:
for key,value in start_index.items():
for k,v in value.items():
if k == 'vectorizer':
v = [f'{x}_vect' for x in v]
self.output_columns[key:key] = v
self.added_features = [*self.added_features, *v]
def text_pipeline(self, conf_json):
if self.text_feature:
from text.textProfiler import textProfiler
from text.textProfiler import textCombine
pipeList = []
text_pipe = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", self.text_feature)
], remainder="drop")),
("text_fillNa",SimpleImputer(strategy='constant', fill_value='')),
("merge_text_feature", textCombine())])
obj = textProfiler()
pipeList = obj.cleaner(conf_json, pipeList, self.data_path)
pipeList = obj.embedding(conf_json, pipeList)
last_step = "merge_text_feature"
for pipe_elem in pipeList:
text_pipe.steps.append((pipe_elem[0], pipe_elem[1]))
last_step = pipe_elem[0]
text_transformer = ('text_process', text_pipe)
self.pipeline.append(text_transformer)
self.feature_expender.append({last_step:len(self.output_columns)})
def create_pipeline(self):
num_pipe = {}
for k,v in self.num_fill_method_dict.items():
for k1,v1 in v.items():
if k1 and k1 != 'none':
num_pipe[f'{k}_{k1}'] = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", v1)
], remainder="drop")),
(k, self.get_num_imputer(k)),
(k1, self.get_num_scaler(k1))
])
else:
num_pipe[f'{k}_{k1}'] = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", v1)
], remainder="drop")),
(k, self.get_num_imputer(k))
])
self.output_columns.extend(v1)
cat_pipe = {}
for k,v in self.cat_fill_method_dict.items():
for k1,v1 in v.items():
cat_pipe[f'{k}_{k1}'] = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", v1)
], remainder="drop")),
(k, self.get_cat_imputer(k)),
(k1, self.get_cat_encoder(k1))
])
if k1 not in ['onehotencoding']:
self.output_columns.extend(v1)
else:
self.feature_expender.append({k1:len(self.output_columns)})
for key, pipe in num_pipe.items():
self.pipeline.append((key, pipe))
for key, pipe in cat_pipe.items():
self.pipeline.append((key, pipe))
"Drop: feature during training but replace with zero during prediction "
def process_drop_fillna(self):
drop_column = []
if 'numFill' in self.process_method.keys():
for col, method in self.process_method['numFill'].items():
if method == 'drop':
self.process_method['numFill'][col] = 'zero'
drop_column.append(col)
if 'catFill' in self.process_method.keys():
for col, method in self.process_method['catFill'].items():
if method == 'drop':
self.process_method['catFill'][col] = 'zero'
drop_column.append(col)
if drop_column:
self.data.dropna(subset=drop_column, inplace=True)
def update_num_fill_dict(self):
self.num_fill_method_dict = {}
if 'numFill' in self.process_method.keys():
for f in cs.supported_method['fillNa']['numeric']:
self.num_fill_method_dict[f] = {}
for en in cs.supported_method['normalization']:
self.num_fill_method_dict[f][en] = []
for col in self.numeric_feature:
numFillDict = self.process_method.get('numFill',{})
normalizationDict = self.process_method.get('normalization',{})
if f |
== numFillDict.get(col, '') and en == normalizationDict.get(col,''):
self.num_fill_method_dict[f][en].append(col)
if not self.num_fill_method_dict[f][en] :
del self.num_fill_method_di |
if colm in self.cat_feature:
if method.lower() in cs.supported_method['categoryEncoding']:
if 'catEncoder' not in self.process_method.keys():
self.process_method['catEncoder'] = {}
if method == 'na' and self.process_method['catEncoder'].get(colm, None):
pass
else:
self.process_method['catEncoder'][colm] = self.check_encoding_method(method, colm,default)
else:
self.log.info(f"-------> categorical encoding method '{method}' is not supported. supported methods are {cs.supported_method['categoryEncoding']}")
def fill_normalizer_method(self,colm, method):
if colm in self.numeric_feature:
if method in cs.supported_method['normalization']:
if 'normalization' not in self.process_method.keys():
self.process_method['normalization'] = {}
if (method == 'na' or method == 'none') and self.process_method['normalization'].get(colm, None):
pass
else:
self.process_method['normalization'][colm] = method
else:
self.log.info(f"-------> Normalization method '{method}' is not supported. supported methods are {cs.supported_method['normalization']}")
def apply_outlier(self):
inlier_indice = np.array([True] * len(self.data))
if self.process_method.get('outlier', None):
self.log.info('-------> Feature wise outlier detection:')
for k,v in self.process_method['outlier'].items():
if k in self.numeric_feature:
if v == 'iqr':
index = cs.findiqrOutlier(self.data[k])
elif v == 'zscore':
index = cs.findzscoreOutlier(self.data[k])
elif v == 'disable':
index = None
if k in self.process_method['outlierOperation'].keys():
if self.process_method['outlierOperation'][k] == 'dropdata':
inlier_indice = np.logical_and(inlier_indice, index)
elif self.process_method['outlierOperation'][k] == 'average':
mean = self.data[k].mean()
index = ~index
self.data.loc[index,[k]] = mean
self.log.info(f'-------> {k}: Replaced by Mean {mean}: total replacement {index.sum()}')
elif self.process_method['outlierOperation'][k] == 'nochange' and v != 'disable':
self.log.info(f'-------> Total outliers in "{k}": {(~index).sum()}')
if self.config.get('outlierDetection',None):
if self.config['outlierDetection'].get('IsolationForest','False') == 'True':
if self.numeric_feature:
index = cs.findiforestOutlier(self.data[self.numeric_feature])
inlier_indice = np.logical_and(inlier_indice, index)
self.log.info(f'-------> Numeric feature based Outlier detection(IsolationForest):')
if inlier_indice.sum() != len(self.data):
self.__update_index(inlier_indice, 'outlier')
def fill_outlier_method(self,colm, method):
if colm in self.numeric_feature:
if method in cs.supported_method['outlier_column_wise']:
if 'outlier' not in self.process_method.keys():
self.process_method['outlier'] = {}
if method not in ['Disable', 'na']:
self.process_method['outlier'][colm] = method
else:
self.log.info(f"-------> outlier detection method '{method}' is not supported for column wise. supported methods are {cs.supported_method['outlier_column_wise']}")
def fill_outlier_process(self,colm, method):
if colm in self.numeric_feature:
if method in cs.supported_method['outlierOperation']:
if 'outlierOperation' not in self.process_method.keys():
self.process_method['outlierOperation'] = {}
self.process_method['outlierOperation'][colm] = method
else:
self.log.info(f"-------> outlier process method '{method}' is not supported for column wise. supported methods are {cs.supported_method['outlierOperation']}")
def get_cat_imputer(self,method):
if method == 'mode':
return SimpleImputer(strategy='most_frequent')
elif method == 'zero':
return SimpleImputer(strategy='constant', fill_value=0)
def get_cat_encoder(self,method):
if method == 'labelencoding':
return OrdinalEncoder()
elif method == 'onehotencoding':
return OneHotEncoder(sparse=False,handle_unknown="ignore")
elif method == 'targetencoding':
if not self.is_target_available():
raise ValueError('Can not apply Target Encoding when target feature is not present')
return TargetEncoder()
def get_num_imputer(self,method):
if method == 'mode':
return SimpleImputer(strategy='most_frequent')
elif method == 'mean':
return SimpleImputer(strategy='mean')
elif method == 'median':
return SimpleImputer(strategy='median')
elif method == 'knnimputer':
return KNNImputer()
elif method == 'zero':
return SimpleImputer(strategy='constant', fill_value=0)
def get_num_scaler(self,method):
if method == 'minmax':
return MinMaxScaler()
elif method == 'standardscaler':
return StandardScaler()
elif method == 'lognormal':
return PowerTransformer(method='yeo-johnson', standardize=False)
def recommenderStartProfiler(self,modelFeatures):
return cs.recommenderStartProfiler(self,modelFeatures)
def folderPreprocessing(self,folderlocation,folderdetails,deployLocation):
return cs.folderPreprocessing(self,folderlocation,folderdetails,deployLocation)
def textSimilarityStartProfiler(self, doc_col_1, doc_col_2):
return cs.textSimilarityStartProfiler(self, doc_col_1, doc_col_2)
def get_conversion_method(self):
return cs.get_one_true_option(self.config.get('textConversionMethod','')).lower()
def set_features(features,profiler=None):
return cs.set_features(features,profiler)
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import random
from matplotlib import pyplot as plt
import cv2
import albumentations as A
import os
import pandas as pd
from pathlib import Path
class ImageAugmentation():
def __init__(self, dataLocation, csvFile):
self.AugmentationOptions = {"Flip": {"operation": A.HorizontalFlip, "suffix":"_flip"},
"Rotate": {"operation": A.Rotate, "suffix":"_rotate"},
"Shift": {"operation": A.RGBShift, "suffix":"_shift"},
"Crop": {"operation": [A.CenterCrop, A.RandomSizedBBoxSafeCrop], "suffix":"_crop"},
"Contrast": {"operation": A.RandomContrast, "suffix":"_cont"},
"Brightness": {"operation": A.RandomBrightness, "suffix":"_bright"},
"Blur": {"operation": A.GaussianBlur, "suffix":"_blur"}
}
self.dataLocation = dataLocation
self.csvFile = csvFile
def __applyAugmentationClass(self, image, augmentation,limit):
if augmentation in list(self.AugmentationOptions.keys()):
if augmentation == "Crop":
height, width, _ = image.shape
crop_percentage = random.uniform(0.6, 0.9)
transform = self.AugmentationOptions[augmentation]["operation"][0](height=int(height*crop_percentage), width=int(width*crop_percentage) )
elif augmentation == "Blur":
transform = self.AugmentationOptions[augmentation]["operation"](blur_limit = limit)
elif augmentation in ["Contrast","Brightness"]:
transform = self.AugmentationOptions[augmentation]["operation"](limit = limit)
else:
transform = self.AugmentationOptions[augmentation]["operation"]()
return transform(image=image)
def __applyAugmentation(self, image, augmentation,limit,bboxes=None, category_ids=None, seed=7):
transformOptions = []
if bboxes:
bbox_params = A.BboxParams(format='pascal_voc', label_fields=['category_ids'])
else:
bbox_params = None
if augmentation in list(self.AugmentationOptions.keys()):
if augmentation == "Crop":
height, width, _ = image.shape
crop_percentage = random.uniform(0.6, 0.9)
transformOptions.append(self.AugmentationOptions[augmentation]["operation"][1](height=int(height*crop_percentage), width=int(width*crop_percentage) ))
elif augmentation == "Blur":
transformOptions.append(self.AugmentationOptions[augmentation]["operation"](blur_limit = limit))
elif augmentation in ["Contrast","Brightness"]:
transformOptions.append(self.AugmentationOptions[augmentation]["operation"](limit = limit))
else:
transformOptions.append(self.AugmentationOptions[augmentation]["operation"]())
transform = A.Compose(
transformOptions,
bbox_params=bbox_params,
)
random.seed(seed)
return transform(image=image, bboxes=bboxes, category_ids=category_ids)
else:
return None
def getBBox(self, df, imageLoc, category_name_to_id):
subDf = df[df['loc']==imageLoc]
boxes = []
category = []
for index, row in subDf.iterrows():
boxes.append( [row['xmin'],row['ymin'],row['xmax'],row['ymax']])
category.append(category_name_to_id[row['Label']])
return boxes, category
def __objAug(self, imageLoc, df, classes_names, category_id_to_name, category_name_to_id,limit,numberofImages,op):
for x in range(numberofImages):
bbox, category_ids = self.getBBox(df, imageLoc, category_name_to_id)
image = cv2.imread(imageLoc)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
transformed = self.__applyAugmentation(image, op,limit,bbox, category_ids)
transformed['image'] = cv2.cvtColor(transformed['image'], cv2.COLOR_RGB2BGR)
count = 1
row = df[df['loc']==imageLoc].iloc[0]
filename = (Path(imageLoc).stem +'_'+str(x)+ self.AugmentationOptions[op]["suffix"] + Path(imageLoc).suffix)
newImage = str(Path(imageLoc).parent/filename)
for index,bbox in enumerate(transformed['bboxes']):
data = {'File':filename, 'xmin':bbox[0],'ymin':bbox[1],'xmax':bbox[2],'ymax':bbox[3],'Label':category_id_to_name[transformed['category_ids'][index]],'id':count,'height':row['height'],'width':row['width'], 'angle':0.0, 'loc': newImage, 'AugmentedImage': True}
count += 1
df=df.append(data, ignore_index=True)
cv2.imwrite(newImage, transformed['image'])
return df
def __objectDetection(self, images, df, optionDf, classes_names, suffix='',augConf={}):
category_id_to_name = {v+1:k for v,k in enumerate(classes_names)}
category_name_to_id = {k:v+1 for v,k in enumerate(classes_names)}
for i, imageLoc in enumerate(images):
for key in optionDf.columns:
if optionDf.iloc[i][key]:
if key in augConf:
limit = eval(augConf[key].get('limit','0.2'))
numberofImages = int(augConf[key].get('noOfImages',1))
else:
limit = 0.2
numberofImages = 1
df = self.__objAug(imageLoc, df, classes_names, category_id_to_name,category_name_to_id,limit,numberofImages,op=key)
return df
def __augClassificationImage(self, imageLoc, df,limit,imageindex,op):
data = {}
image = cv2.imread(imageLoc)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
transformed = self.__applyAugmentationClass(image, op,limit)
transformed['image'] = cv2.cvtColor(transformed['image'], cv2.COLOR_RGB2BGR)
location = Path(imageLoc).parent
filename = (Path(imageLoc).stem +'_'+'str(imageindex)'+ self.AugmentationOptions[op]["suffix"] + Path(imageLoc).suffix)
cv2.imwrite(str(location/'AION'/'AugumentedImages'/filename), transformed['image'])
data['File'] = filename
data['Label'] = df[df['File']==Path(imageLoc).name]["Label"].iloc[0]
data['AugmentedImage'] = True
data['loc'] = str(location/filename)
return data
def __classification(self, images, df, optionDf,augConf,csv_file=None, outputDir=None):
for i, imageLoc in enumerate(images):
for key in optionDf.columns:
if optionDf.iloc[i][key]:
if key in augConf:
limit = eval(augConf[key].get('limit','0.2'))
numberofImages = int(augConf[key].get('noOfImages',1))
else:
limit = 0.2
numberofImages = 1
for x in range(numberofImages):
rows = self.__augClassificationImage(imageLoc, df,limit,x,op=key)
df=df.append(rows, ignore_index=True)
return df
def removeA |
ugmentedImages(self, df):
removeDf = df[df['AugmentedImage'] == True]['loc'].unique().tolist()
#df[df['imageAugmentationOriginalImage'] != True][loocationField].apply(lambda x: Path(x).unlink())
for file in removeDf:
if file:
Path(file).unlink()
def augment(self, modelType="imageclassification",params=None,csvSavePath = None,augConf={}):
if isinstance(params, dict) and any(params.values()):
df = pd.read_csv(self.csvFile)
if not self.dataLocation.endswith('/'):
images = self.dataLocation+'/'
else:
images = self.dataLocation
if modelType == "imageclassification":
images = images + df['File']
else:
images = images + df['File']
df['loc'] = images
images = set(images.tolist())
option = {}
for key in list(self.AugmentationOptions.keys()):
option[key] = params.get(key, False)
optionDf = pd.DataFrame(columns=list(option.keys()))
for i in range(len(images)):
optionDf = optionDf.append(option, ignore_index=True)
if modelType == "imageclassification":
df = self.__classification(images, df, optionDf,augConf)
else:
classes_names = sorted(df['Label'].unique().tolist())
df = self.__objectDetection(images, df, optionDf, classes_names,'',augConf)
df.to_csv(self.csvFile, index=False)
return self.csvFile<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
#System imports
import logging
from distutils.util import strtobool
import pandas as pd
from text import TextProcessing
def get_one_true_option(d, default_value):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
class textProfiler():
def __init__(self):
self.log = logging.getLogger('eion')
def textCleaning(self, textCorpus):
textProcessor = TextProcessing.TextProcessing()
textCorpus = textProcessor.transform(textCorpus)
return(textCorpus)
def textProfiler(self, textCorpus, conf_json, pipeList, max_features):
cleaning_kwargs = {}
textCleaning = conf_json.get('textCleaning')
self.log.info("Text Preprocessing config: ",textCleaning)
cleaning_kwargs['fRemoveNoise'] = strtobool(textCleaning.get('removeNoise', 'True'))
cleaning_kwargs['fNormalize'] = strtobool(textCleaning.get('normalize', 'True'))
cleaning_kwargs['fReplaceAcronym'] = strtobool(textCleaning.get('replaceAcronym', 'False'))
cleaning_kwargs['fCorrectSpelling'] = strtobool(textCleaning.get('correctSpelling', 'False'))
cleaning_kwargs['fRemoveStopwords'] = strtobool(textCleaning.get('removeStopwords', 'True'))
cleaning_kwargs['fRemovePunctuation'] = strtobool(textCleaning.get('removePunctuation', 'True'))
cleaning_kwargs['fRemoveNumericTokens'] = strtobool(textCleaning.get('removeNumericTokens', 'True'))
cleaning_kwargs['normalizationMethod'] = get_one_true_option(textCleaning.get('normalizeMethod'),
'lemmatization').capitalize()
removeNoiseConfig = textCleaning.get('removeNoiseConfig')
if type(removeNoiseConfig) is dict:
cleaning_kwargs['removeNoise_fHtmlDecode'] = strtobool(removeNoiseConfig.get('decodeHTML', 'True'))
cleaning_kwargs['removeNoise_fRemoveHyperLinks'] = strtobool(removeNoiseConfig.get('removeHyperLinks', 'True'))
cleaning_kwargs['removeNoise_fRemoveMentions'] = strtobool(removeNoiseConfig.get('removeMentions', 'True'))
cleaning_kwargs['removeNoise_fRemoveHashtags'] = strtobool(removeNoiseConfig.get('removeHashtags', 'True'))
cleaning_kwargs['removeNoise_RemoveOrReplaceEmoji'] = 'remove' if strtobool(removeNoiseConfig.get('removeEmoji', 'True')) else 'replace'
cleaning_kwargs['removeNoise_fUnicodeToAscii'] = strtobool(removeNoiseConfig.get('unicodeToAscii', 'True'))
cleaning_kwargs['removeNoise_fRemoveNonAscii'] = strtobool(removeNoiseConfig.get('removeNonAscii', 'True'))
acronymConfig = textCleaning.get('acronymConfig')
if type(acronymConfig) is dict:
cleaning_kwargs['acronymDict'] = acronymConfig.get('acronymDict', None)
stopWordsConfig = textCleaning.get('stopWordsConfig')
if type(stopWordsConfig) is dict:
cleaning_kwargs['stopwordsList'] = stopWordsConfig.get('stopwordsList', [])
cleaning_kwargs['extend_or_replace_stopwordslist'] = 'extend' if strtobool(stopWordsConfig.get('extend', 'True')) else 'replace'
removeNumericConfig = textCleaning.get('removeNumericConfig')
if type(removeNumericConfig) is dict:
cleaning_kwargs['removeNumeric_fIncludeSpecialCharacters'] = strtobool(removeNumericConfig.get('removeNumeric_IncludeSpecialCharacters', 'True'))
removePunctuationConfig = textCleaning.get('removePunctuationConfig')
if type(removePunctuationConfig) is dict:
cleaning_kwargs['fRemovePuncWithinTokens'] = strtobool(removePunctuationConfig.get('removePuncWithinTokens', 'False'))
cleaning_kwargs['fExpandContractions'] = strtobool(textCleaning.get('expandContractions', 'False'))
if cleaning_kwargs['fExpandContractions']:
cleaning_kwargs['expandContractions_googleNewsWordVectorPath'] = GOOGLE_NEWS_WORD_VECTORS_PATH
libConfig = textCleaning.get('libConfig')
if type(libConfig) is dict:
cleaning_kwargs['tokenizationLib'] = get_one_true_option(libConfig.get('tokenizationLib'), 'nltk')
cleaning_kwargs['lemmatizationLib'] = get_one_true_option(libConfig.get('lemmatizationLib'), 'nltk')
cleaning_kwargs['stopwordsRemovalLib'] = get_one_true_option(libConfig.get('stopwordsRemovalLib'), 'nltk')
textProcessor = TextProcessing.TextProcessing(**cleaning_kwargs)
textCorpus = textProcessor.transform(textCorpus)
pipeList.append(("TextProcessing",textProcessor))
textFeatureExtraction = conf_json.get('textFeatureExtraction')
if strtobool(textFeatureExtraction.get('pos_tags', 'False')):
pos_tags_lib = get_one_true_option(textFeatureExtraction.get('pos_tags_lib'), 'nltk')
posTagger = TextProcessing.PosTagging( pos_tags_lib)
textCorpus = posTagger.transform(textCorpus)
pipeList.append(("posTagger",posTagger))
ngram_min = 1
ngram_max = 1
if strtobool(textFeatureExtraction.get('n_grams', 'False')):
n_grams_config = textFeatureExtraction.get("n_grams_config")
ngram_min = int(n_grams_config.get('min_n', 1))
ngram_max = int(n_grams_config.get('max_n', 1))
if (ngram_min < 1) or ngram_min > ngram_max:
ngram_min = 1
ngram_max = 1
invalidNgramWarning = 'WARNING : invalid ngram config.\\nUsing the default values min_n={}, max_n={}'.format(ngram_min, ngram_max)
self.log.info(invalidNgramWarning)
ngram_range_tuple = (ngram_min, ngram_max)
textConversionMethod = conf_json.get('textConversionMethod')
conversion_method = get_one_true_option(textConversionMethod, None)
if conversion_method.lower() == "countvectors":
X, vectorizer = TextProcessing.ExtractFeatureCountVectors(textCorpus, ngram_range=ngram_range_tuple, max_features=max_features)
pipeList.append(("vectorizer",vectorizer))
df1 = pd.DataFrame(X.toarray(), columns=vectorizer.get_feature_names())
df1 = df1.add_suffix('_vect')
self.log.info('----------> Conversion Method: CountVectors')
elif conversion_method.lower() in ["word2vec","fasttext","glove"]:
embedding_method = conversion_method
wordEmbeddingVecotrizer = TextProcessing.wordEmbedding(embedding_method)
wordEmbeddingVecotrizer.checkAndDownloadPretrainedModel()
X = wordEmbeddingVecotrizer.transform(textCorpus)
df1 = pd.DataFrame(X)
df1 = df1.add_suffix('_vect')
pipeList.append(("vectorizer",wordEmbeddingVecotrizer))
self.log.info('----------> Conversion Method: '+str(conversion_method))
elif conversion_method.lower() == "sentencetransformer":
from sentence_transformers import SentenceTransformer
model = SentenceTransformer('sentence-transformers/msmarco-distilroberta-base-v2')
X = model.encode(textCorpus)
df1 = pd.DataFrame(X)
df1 = df1.add_suffix('_vect')
pipeList.append(("vectorizer",model))
self.log.info('----------> Conversion Method: SentenceTransformer')
elif conversion_method.lower() == 'tf_idf':
X, vectorizer = TextProcessing.ExtractFeatureTfIdfVectors(textCorpus,ngram_range=ngram_range_tuple, max_features=max_features)
pipeList.append(("vectorizer",vectorizer))
df1 = pd.DataFrame(X.toarray(), columns=vectorizer.get_feature_names())
df1 = df1.add_suffix('_vect')
self.log.info('----------> Conversion Method: TF_IDF')
else:
df1 = pd.DataFrame()
df1['tokenize'] = textCorpus
self.log.info('----------> Conversion Method: NA')
return df1, pipeList,conversion_method
<s> import os
import sys
import numpy as np
import scipy
import pandas as pd
from pathlib import Path
default_config = {
'misValueRatio': '1.0',
'numericFeatureRatio': '1.0',
'categoryMaxLabel': '20',
'str_to_cat_len_max': 10
}
target_encoding_method_change = {'targetencoding': 'labelencoding'}
supported_method = {
'fillNa':
{
'categorical' : ['mode','zero','na'],
'numeric' : ['median','mean','knnimputer','zero','drop','na'],
},
'categoryEncoding': ['labelencoding','targetencoding','onehotencoding','na','none'],
'normalization': ['standardscaler','minmax','lognormal', 'na','none'],
'outlier_column_wise': ['iqr','zscore', 'disable', 'na'],
'outlierOperation': ['dropdata', 'average', 'nochange']
}
def findiqrOutlier(df):
Q1 = df.quantile(0.25)
Q3 = df.quantile(0.75)
IQR = Q3 - Q1
index = ~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR)))
return index
def findzscoreOutlier(df):
z = np.abs(scipy.stats.zscore(df))
index = (z < 3)
return index
def findiforestOutlier(df):
from sklearn.ensemble import IsolationForest
isolation_forest = IsolationForest(n_estimators=100)
isolation_forest.fit(df)
y_pred_train = isolation_forest.predict(df)
return y_pred_train == 1
def get_one_true_option(d, default_value=None):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
def get_boolean(value):
if (isinstance(value, str) and value.lower() == 'true') or (isinstance(value, bool) and value == True):
return True
else:
return False
def recommenderStartProfiler(self,modelFeatures):
try:
self.log.info('----------> FillNA:0')
self.data = self.data.fillna(value=0)
self.log.info('Status:- !... Missing value treatment done')
self.log.info('----------> Remove Empty Row')
self.data = self.data.dropna(axis=0,how='all')
self.log.info('Status:- !... Empty feature treatment done')
userId,itemId,rating = modelFeatures.split(',')
self.data[itemId] = self.data[itemId].astype(np.int32)
self.data[userId] = self.data[userId].astype(np.int32)
self.data[rating] = self.data[rating].astype(np.float32)
return self.data
except Exception as inst:
self.log.info("Error: dataProfiler failed "+str(inst))
return(self.data)
def folderPreprocessing |
(self,folderlocation,folderdetails,deployLocation):
try:
dataset_directory = Path(folderlocation)
dataset_csv_file = dataset_directory/folderdetails['label_csv_file_name']
tfrecord_directory = Path(deployLocation)/'Video_TFRecord'
from savp import PreprocessSAVP
import csv
csvfile = open(dataset_csv_file, newline='')
csv_reader = csv.DictReader(csvfile)
PreprocessSAVP(dataset_directory,csv_reader,tfrecord_directory)
dataColumns = list(self.data.columns)
VideoProcessing = True
return dataColumns,VideoProcessing,tfrecord_directory
except Exception as inst:
self.log.info("Error: dataProfiler failed "+str(inst))
def textSimilarityStartProfiler(self, doc_col_1, doc_col_2):
import os
try:
features = [doc_col_1, doc_col_2]
pipe = None
dataColumns = list(self.data.columns)
self.numofCols = self.data.shape[1]
self.numOfRows = self.data.shape[0]
from transformations.textProfiler import textProfiler
self.log.info('-------> Execute Fill NA With Empty String')
self.data = self.data.fillna(value=" ")
self.log.info('Status:- |... Missing value treatment done')
self.data[doc_col_1] = textProfiler().textCleaning(self.data[doc_col_1])
self.data[doc_col_2] = textProfiler().textCleaning(self.data[doc_col_2])
self.log.info('-------> Concatenate: ' + doc_col_1 + ' ' + doc_col_2)
self.data['text'] = self.data[[doc_col_1, doc_col_2]].apply(lambda row: ' '.join(row.values.astype(str)), axis=1)
from tensorflow.keras.preprocessing.text import Tokenizer
pipe = Tokenizer()
pipe.fit_on_texts(self.data['text'].values)
self.log.info('-------> Tokenizer: Fit on Concatenate Field')
self.log.info('Status:- |... Tokenizer the text')
self.data[doc_col_1] = self.data[doc_col_1].astype(str)
self.data[doc_col_1] = self.data[doc_col_1].astype(str)
return (self.data, pipe, self.target_name, features)
except Exception as inst:
self.log.info("StartProfiler failed " + str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
def set_features(features,profiler=None):
if profiler:
features = [x for x in features if x not in profiler.added_features]
return features + profiler.text_feature
return features<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import sys
from pathlib import Path
import urllib.request
import tarfile
import json
import subprocess
import os
from os.path import expanduser
import platform
class ODpretrainedModels():
def __init__(self, location=None):
if location:
if isinstance(location, Path):
self.pretrained_models_location = location.as_posix()
else:
self.pretrained_models_location = location
else:
p = subprocess.run([sys.executable, "-m", "pip","show","AION"],capture_output=True, text=True)
if p.returncode == 0:
Output = p.stdout.split('\\n')
for x in Output:
y = x.split(':',1)
if(y[0]=='Location'):
self.pretrained_models_location = y[1].strip()+"/AION/pretrained_models/object_detection"
break
if Path(self.pretrained_models_location).is_dir():
self.config_file_location = self.pretrained_models_location+'/supported_models.json'
with open(self.config_file_location) as json_data:
self.supportedModels = json.load(json_data)
home = expanduser("~")
if platform.system() == 'Windows':
self.modelsPath = os.path.join(home,'AppData','Local','HCLT','AION','PreTrainedModels','ObjectDetection')
else:
self.modelsPath = os.path.join(home,'HCLT','AION','PreTrainedModels','ObjectDetection')
if os.path.isdir(self.modelsPath) == False:
os.makedirs(self.modelsPath)
def __save_config(self):
with open(self.config_file_location, 'w') as json_file:
json.dump(self.supportedModels, json_file)
def __download(self, modelName):
try:
url = self.supportedModels[modelName]["url"]
file = self.supportedModels[modelName]["file"]
local_file_path = Path(self.modelsPath)/(file+".tar.gz")
urllib.request.urlretrieve(url, local_file_path)
except:
raise ValueError("{} model download error, check your internet connection".format(modelName))
return local_file_path
def __extract(self, modelName, file_location, extract_dir):
try:
tarFile = tarfile.open(file_location)
tarFile.extractall(extract_dir)
tarFile.close()
Path.unlink(file_location)
return True
except:
return False
def download(self, modelName):
if modelName in list(self.supportedModels.keys()):
p = Path(self.modelsPath).glob('**/*')
modelsDownloaded = [x.name for x in p if x.is_dir()]
if self.supportedModels[modelName]['file'] not in modelsDownloaded:
file = self.__download(modelName)
self.supportedModels[modelName]["downloaded"] = True
if self.__extract(modelName, file, self.modelsPath):
self.supportedModels[modelName]["extracted"] = True
self.__save_config()
else:
self.__save_config()
raise ValueError("{} model downloaded but extraction failed,please try again".format(modelName))
else:
raise ValueError("{} is not supported for object detection".format(modelName))
return self.supportedModels[modelName]
def get_info(self,modeltype):
models_info = {}
p = Path(self.pretrained_models_location)
downloaded_models = [x.name for x in p.iterdir() if x.is_dir()]
for model in list(self.supportedModels.keys()):
if (self.supportedModels[model]['type'] == modeltype) or (modeltype == ''):
models_info[model] = self.supportedModels[model]['extracted']
return models_info
def is_model_exist(self, model_name):
models = self.get_info('')
status = "NOT_SUPPORTED"
if model_name in models:
if self.supportedModels[model_name]['extracted']:
status = "READY"
else:
status = "NOT_READY"
return status
def clear_config(self, model_name):
self.supportedModels[model_name]['extracted'] = False
self.supportedModels[model_name]['downloaded'] = False
self.__save_config()
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import numpy as np
import os
import sys
import string
import spacy
#import en_core_web_sm
from spacy.lang.en.stop_words import STOP_WORDS
from spacy.lang.en import English
try:
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
except:
from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS
from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer
from sklearn.base import TransformerMixin
from nltk.stem import WordNetLemmatizer
import re
from collections import defaultdict
from nltk.corpus import wordnet as wn
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelBinarizer
from nltk.tokenize import word_tokenize
from nltk import pos_tag
from nltk.corpus import stopwords
class textDataProfiler():
def __init__(self):
self.data=None
#self.nlp=en_core_web_sm.load()
self.punctuations = string.punctuation
self.stopwords = list(STOP_WORDS)
def startTextProfiler(self,df,target):
try:
dataColumns = list(df.columns)
print(' \\n No of rows and columns in dataFrame',df.shape)
print('\\n features in dataFrame',dataColumns)
dataFDtypes=self.dataFramecolType(df)
print('\\n feature types in dataFrame',dataFDtypes)
trainX=df['text']
trainY=df[target]
return trainX,trainY
except Exception as inst:
print('startTextProfiler code execution failed !....',inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(exc_type, fname, exc_tb.tb_lineno)
def dataFramecolType(self,dataFrame):
dataFDtypes=[]
try:
dataColumns=list(dataFrame.columns)
for i in dataColumns:
dataType=dataFrame[i].dtypes
dataFDtypes.append(tuple([i,str(dataType)]))
return dataFDtypes
except Exception as e:
print("error in dataFramecolyType",e)
return dataFDtypes
def textTokenizer(self,text):
try:
parser = English()
tokens = parser(text)
tokens = [ word.lemma_.lower().strip() if word.lemma_ != "-PRON-" else word.lower_ for word in tokens ]
tokens = [ word for word in tokens if word not in self.stopwords and word not in self.punctuations ]
return tokens
except Exception as inst:
print('textDataProfiler code execution failed !....',inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(exc_type, fname, exc_tb.tb_lineno)
return {}
def cleanText(self,text):
try:
text=str(text).strip().lower()
for punctuation in string.punctuation:
text = text.replace(punctuation, '')
return text
except Exception as inst:
print('cleanText code execution failed !....',inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(exc_type, fname, exc_tb.tb_lineno)
def textTokenization(self,text):
try:
tokenizedText=word_tokenize(text)
return tokenizedText
except Exception as inst:
print('textDataProfiler code execution failed !....',inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(exc_type, fname, exc_tb.tb_lineno)
return {}
def textLemmitizer(self,text):
try:
tag_map = defaultdict(lambda : wn.NOUN)
tag_map['J'] = wn.ADJ
tag_map['V'] = wn.VERB
tag_map['R'] = wn.ADV
Final_words = []
word_Lemmatized = WordNetLemmatizer()
for word, tag in pos_tag(text):
if word not in stopwords.words('english') and word.isalpha():
word_Final = word_Lemmatized.lemmatize(word,tag_map[tag[0]])
Final_words.append(word_Final)
return str(Final_words)
except Exception as inst:
print('textLemmitizer code execution failed !....',inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(exc_type, fname, exc_tb.tb_lineno)
return {}
class TextCleaner(TransformerMixin):
def clean_text(self,text):
try:
text=str(text).strip().lower()
text = text.replace("isn't", "is not")
text = text.replace("aren't", "are not")
text = text.replace("ain't", "am not")
text = text.replace("won't", "will not")
text = text.replace("didn't", "did not")
text = text.replace("shan't", "shall not")
text = text.replace("haven't", "have not")
text = text.replace("hadn't", "had not")
text = text.replace("hasn't", "has not")
text = text.replace("don't |
", "do not")
text = text.replace("wasn't", "was not")
text = text.replace("weren't", "were not")
text = text.replace("doesn't", "does not")
text = text.replace("'s", " is")
text = text.replace("'re", " are")
text = text.replace("'m", " am")
text = text.replace("'d", " would")
text = text.replace("'ll", " will")
text = re.sub(r'^https?:\\/\\/.*[\\r\\n]*', ' ', text, flags=re.MULTILINE)
text = re.sub(r'[\\w\\.-]+@[\\w\\.-]+', ' ', text, flags=re.MULTILINE)
for punctuation in string.punctuation:
text = text.replace(punctuation,' ')
text = re.sub(r'[^A-Za-z0-9\\s]',r' ',text)
text = re.sub(r'\\n',r' ',text)
text = re.sub(r'[0-9]',r' ',text)
wordnet_lemmatizer = WordNetLemmatizer()
text = " ".join([wordnet_lemmatizer.lemmatize(w, pos='v') for w in text.split()])
return text
except Exception as inst:
print('TextCleaner clean_text code execution failed !....',inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(exc_type, fname, exc_tb.tb_lineno)
def text_cleaner(self,text):
text = self.clean_text(text)
stop_words = set(stopwords.words('english'))
text_tokens = word_tokenize(text)
out=' '.join(str(j) for j in text_tokens if j not in stop_words and (len(j)!=1))
return(out)
def transform(self, X, **transform_params):
# Cleaning Text
return [self.clean_text(text) for text in X]
def fit(self, X, y=None, **fit_params):
return self
def get_params(self, deep=True):
return {}<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import glob
import pandas as pd
import io
import xml.etree.ElementTree as ET
import argparse
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging (1)
import tensorflow as tf
from PIL import Image
from object_detection.utils import dataset_util, label_map_util
from collections import namedtuple
from pathlib import Path
def class_text_to_int(row_label, label_map_dict):
return label_map_dict[row_label]
def split(df, group):
data = namedtuple('data', ['File', 'object'])
gb = df.groupby(group)
return [data(File, gb.get_group(x)) for File, x in zip(gb.groups.keys(), gb.groups)]
def create_tf_example(group, path, label_map_dict):
with tf.io.gfile.GFile(os.path.join(path, '{}'.format(group.File)), 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
width, height = image.size
File = group.File.encode('utf8')
image_format = b'jpg'
xmins = []
xmaxs = []
ymins = []
ymaxs = []
classes_text = []
classes = []
for index, row in group.object.iterrows():
xmin_n = min(row['xmin'], row['xmax'])
xmax_n = max(row['xmin'], row['xmax'])
ymin_n = min(row['ymin'], row['ymax'])
ymax_n = max(row['ymin'], row['ymax'])
xmin_new = min(xmin_n, width)
xmax_new = min(xmax_n, width)
ymin_new = min(ymin_n, height)
ymax_new = min(ymax_n, height)
xmn = xmin_new / width
xmins.append(xmn)
xmx = xmax_new / width
xmaxs.append(xmx)
ymn = ymin_new / height
ymins.append(ymn)
ymx = ymax_new / height
ymaxs.append(ymx)
classes_text.append(row['Label'].encode('utf8'))
classes.append(class_text_to_int(row['Label'], label_map_dict))
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(File),
'image/source_id': dataset_util.bytes_feature(File),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
}))
return tf_example
def labelFile(classes_names, label_map_path):
pbtxt_content = ""
for i, class_name in enumerate(classes_names):
pbtxt_content = (
pbtxt_content
+ "item {{\\n id: {0}\\n name: '{1}'\\n}}\\n\\n".format(i + 1, class_name)
)
pbtxt_content = pbtxt_content.strip()
with open(label_map_path, "w") as f:
f.write(pbtxt_content)
def createLabelFile(train_df, save_path):
labelmap_path = str(Path(save_path)/ 'label_map.pbtxt')
classes_names = sorted(train_df['Label'].unique().tolist())
labelFile(classes_names, labelmap_path)
return labelmap_path, len(classes_names)
def generate_TF_record(image_dir, output_dir, train_df, test_df, labelmap_path):
outputPath = str(Path(output_dir)/ 'train.tfrecord')
writer = tf.io.TFRecordWriter( outputPath)
grouped = split(train_df, 'File')
label_map = label_map_util.load_labelmap(labelmap_path )
label_map_dict = label_map_util.get_label_map_dict(label_map)
for group in grouped:
tf_example = create_tf_example(group, image_dir, label_map_dict)
writer.write(tf_example.SerializeToString())
writer.close()
if len(test_df):
outputPath = str(Path(output_dir)/ 'test.tfrecord')
writer = tf.io.TFRecordWriter( outputPath)
grouped = split(test_df, 'File')
for group in grouped:
tf_example = create_tf_example(group, image_dir, label_map_dict)
writer.write(tf_example.SerializeToString())
writer.close()
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> from kafka import KafkaConsumer
from json import loads
import pandas as pd
import json
import os,sys
import time
import multiprocessing
from os.path import expanduser
import platform
import datetime
modelDetails = {}
class Process(multiprocessing.Process):
def __init__(self, modelSignature,jsonData,predictedData,modelpath):
super(Process, self).__init__()
self.config = jsonData
self.modelSignature = modelSignature
self.data = predictedData
self.modelpath = modelpath
def run(self):
#data = pd.json_normalize(self.data)
minotoringService = self.config['minotoringService']['url']
trainingdatalocation = self.config['trainingDataLocation'][self.modelSignature]
#filetimestamp = 'AION_'+str(int(time.time()))+'.csv'
#data.to_csv(dataFile, index=False)
inputFieldsJson = {"trainingDataLocation":trainingdatalocation,"currentDataLocation":self.data}
inputFieldsJson = json.dumps(inputFieldsJson)
ser_url = minotoringService+self.modelSignature+'/monitoring'
driftTime = datetime.datetime.now()
import requests
try:
response = requests.post(ser_url, data=inputFieldsJson,headers={"Content-Type":"application/json",})
outputStr=response.content
outputStr = outputStr.decode('utf-8')
outputStr = outputStr.strip()
decoded_data = json.loads(outputStr)
print(decoded_data)
status = decoded_data['status']
msg = decoded_data['data']
except Exception as inst:
if 'Failed to establish a new connection' in str(inst):
status = 'Fail'
msg = 'AION Service needs to be started'
else:
status = 'Fail'
msg = 'Error during Drift Analysis'
statusFile = os.path.join(self.modelpath,self.modelSignature+'_status.csv')
df = pd.DataFrame(columns = ['dateTime', 'status', 'msg'])
df = df.append({'dateTime' : driftTime, 'status' : status, 'msg' : msg},ignore_index = True)
print(df)
if (os.path.exists(statusFile)):
df.to_csv(statusFile, mode='a', header=False,index=False)
else:
df.to_csv(statusFile, header=True,index=False)
def launch_kafka_consumer():
from appbe.dataPath import DATA_DIR
configfile = os.path.join(os.path.dirname(__file__),'..','config','kafkaConfig.conf')
with open(configfile,'r',encoding='utf-8') as f:
jsonData = json.load(f)
f.close()
kafkaIP=jsonData['kafkaCluster']['ip']
kafkaport = jsonData['kafkaCluster']['port']
topic = jsonData['kafkaCluster']['topic']
kafkaurl = kafkaIP+':'+kafkaport
if jsonData['database']['csv'] == 'True':
database = 'csv'
elif jsonData['database']['mySql'] == 'True':
database = 'mySql'
else:
database = 'csv'
kafkaPath = os.path.join(DATA_DIR,'kafka')
if not (os.path.exists(kafkaPath)):
try:
os.makedirs(kafkaPath)
except OSError as e:
pass
consumer = KafkaConsumer(topic,bootstrap_servers=[kafkaurl],auto_offset_reset='earliest',enable_auto_commit=True,group_id='my-group',value_deserializer=lambda x: loads(x.decode('utf-8')))
for message in consumer:
message = message.value
data = message['data']
data = pd.json_normalize(data)
modelname = message['usecasename']
version = message['version']
modelSignature = modelname+'_'+str(version)
modelpath = os.path.join(kafkaPath,modelSignature)
try:
os.makedirs(modelpath)
except OSError as e:
pass
secondsSinceEpoch = time.time()
if modelSignature not in modelDetails:
modelDetails[modelSignature] = {}
modelDetails[modelSignature]['startTime'] = secondsSinceEpoch
if database == 'csv':
csvfile = os.path.join(modelpath,modelSignature+'.csv')
if (os.path.exists(csvfile)):
data.to_csv(csvfile, mode='a', header=False,index=False)
else:
data.to_csv(csvfile, header=True,index=False)
modelTimeFrame = jsonData['timeFrame'][modelSignature]
currentseconds = time.time()
print(currentseconds - modelDetails[modelSignature]['startTime'])
if (currentseconds - modelDetails[modelSignature]['startTime']) >= float(modelTimeFrame):
csv_path = os.path.join(modelpath,modelSignature+'.csv')
#predictedData = pd.read_csv(csv_path)
##predictedData = predictedData.to_json(orient="records")
index = Process(modelSignature,jsonData,csv_path,modelpath)
index.start()
modelDetails[modelSignature]['startTime'] = secondsSinceEpoch
<s> import os
import shutil
import sys
import subprocess
from os.path import expanduser
import platform
import json
def createDockerImage(model_name,model_version,module,folderpath):
command = 'docker pull python:3.8-slim-buster'
os.system(command);
subprocess.check_call(["docker", "build", "-t",module+'_'+model_name.lower()+":"+model_version,"."], cwd=folderpath)
def local_docker_build(config):
print(config)
config = json.loads(config)
model_name = config['usecase']
model_version = config['version']
mlaac__code_path = config['mlacPath']
docker_images = {}
docker_images['ModelMonitoring'] = 'modelmonitoring'+'_'+model_name.lower()+':'+model_version
dataset_addr = os.path.join(mlaac__code_path,'ModelMonitoring')
createDockerImage(model_name,model |
_version,'modelmonitoring',dataset_addr)
docker_images['DataIngestion'] = 'dataingestion'+'_'+model_name.lower()+':'+model_version
dataset_addr = os.path.join(mlaac__code_path,'DataIngestion')
createDockerImage(model_name,model_version,'dataingestion',dataset_addr)
transformer_addr = os.path.join(mlaac__code_path,'DataTransformation')
docker_images['DataTransformation'] = 'datatransformation'+'_'+model_name.lower()+':'+model_version
createDockerImage(model_name,model_version,'datatransformation',transformer_addr)
featureengineering_addr = os.path.join(mlaac__code_path,'FeatureEngineering')
docker_images['FeatureEngineering'] = 'featureengineering'+'_'+model_name.lower()+':'+model_version
createDockerImage(model_name,model_version,'featureengineering',featureengineering_addr)
from os import listdir
arr = [filename for filename in os.listdir(mlaac__code_path) if filename.startswith("ModelTraining")]
docker_training_images = []
for x in arr:
dockertraing={}
dockertraing['Training'] = str(x).lower()+'_'+model_name.lower()+':'+model_version
docker_training_images.append(dockertraing)
training_addri = os.path.join(mlaac__code_path,x)
createDockerImage(model_name,model_version,str(x).lower(),training_addri)
docker_images['ModelTraining'] = docker_training_images
docker_images['ModelRegistry'] = 'modelregistry'+'_'+model_name.lower()+':'+model_version
deploy_addr = os.path.join(mlaac__code_path,'ModelRegistry')
createDockerImage(model_name,model_version,'modelregistry',deploy_addr)
docker_images['ModelServing'] = 'modelserving'+'_'+model_name.lower()+':'+model_version
deploy_addr = os.path.join(mlaac__code_path,'ModelServing')
createDockerImage(model_name,model_version,'modelserving',deploy_addr)
outputjsonFile = os.path.join(mlaac__code_path,'dockerlist.json')
with open(outputjsonFile, 'w') as f:
json.dump(docker_images, f)
f.close()
output = {'Status':'Success','Msg':outputjsonFile}
output = json.dumps(output)
print("aion_build_container:",output)<s> import docker
import json
import logging
def read_json(file_path):
data = None
with open(file_path,'r') as f:
data = json.load(f)
return data
def run_pipeline(inputconfig):
inputconfig = json.loads(inputconfig)
logfilepath = inputconfig['logfilepath']
logging.basicConfig(level=logging.INFO,filename =logfilepath)
usecasename = inputconfig['usecase']
logging.info("UseCaseName :"+str(usecasename))
version = inputconfig['version']
logging.info("version :"+str(version))
config = inputconfig['dockerlist']
persistancevolume = inputconfig['persistancevolume']
logging.info("PersistanceVolume :"+str(persistancevolume))
datasetpath = inputconfig['datasetpath']
logging.info("DataSet Path :"+str(datasetpath))
config = read_json(config)
client = docker.from_env()
inputconfig = {'modelName':usecasename,'modelVersion':str(version),'dataLocation':datasetpath}
inputconfig = json.dumps(inputconfig)
inputconfig = inputconfig.replace('"', '\\\\"')
logging.info("===== Model Monitoring Container Start =====")
outputStr = client.containers.run(config['ModelMonitoring'],'python code.py -i'+datasetpath,volumes=[persistancevolume+':/aion'])
outputStr = outputStr.decode('utf-8')
logging.info('ModelMonitoring: '+str(outputStr))
print('ModelMonitoring: '+str(outputStr))
logging.info("===== ModelMonitoring Stop =====")
logging.info("===== Data Ingestion Container Start =====")
outputStr = client.containers.run(config['DataIngestion'],'python code.py',volumes=[persistancevolume+':/aion'])
outputStr = outputStr.decode('utf-8')
logging.info('DataIngestion: '+str(outputStr))
print('DataIngestion: '+str(outputStr))
logging.info("===== Data Ingestion Container Stop =====")
outputStr = outputStr.strip()
decoded_data = json.loads(outputStr)
status = decoded_data['Status']
if status != 'Success':
output = {'Status':'Error','Msg':'Data Ingestion Fails'}
logging.info("===== Transformation Container Start =====")
outputStr = client.containers.run(config['DataTransformation'],'python code.py',volumes=[persistancevolume+':/aion'])
outputStr = outputStr.decode('utf-8')
logging.info('Data Transformations: '+str(outputStr))
print('Data Transformations: '+str(outputStr))
logging.info("===== Transformation Container Done =====")
outputStr = outputStr.strip()
decoded_data = json.loads(outputStr)
status = decoded_data['Status']
if status != 'Success':
output = {'Status':'Error','Msg':'Data Transformations Fails'}
logging.info("===== Feature Engineering Container Start =====")
outputStr = client.containers.run(config['FeatureEngineering'],'python code.py',volumes=[persistancevolume+':/aion'])
outputStr = outputStr.decode('utf-8')
logging.info('FeatureEngineering: '+str(outputStr))
print('FeatureEngineering: '+str(outputStr))
logging.info("===== Feature Engineering Container Done =====")
outputStr = outputStr.strip()
decoded_data = json.loads(outputStr)
status = decoded_data['Status']
modeltraining = config['ModelTraining']
for mt in modeltraining:
logging.info("===== Training Container Start =====")
outputStr = client.containers.run(mt['Training'],'python code.py',volumes=[persistancevolume+':/aion'])
outputStr = outputStr.decode('utf-8')
logging.info('ModelTraining: '+str(outputStr))
print('ModelTraining: '+str(outputStr))
logging.info("===== Training Container Done =====")
outputStr = outputStr.strip()
try:
decoded_data = json.loads(outputStr)
status = decoded_data['Status']
except Exception as inst:
logging.info(inst)
logging.info("===== Model Registry Start =====")
outputStr = client.containers.run(config['ModelRegistry'],'python code.py',volumes=[persistancevolume+':/aion'])
outputStr = outputStr.decode('utf-8')
logging.info('ModelRegistry: '+str(outputStr))
print('ModelRegistry: '+str(outputStr))
logging.info("===== ModelRegistry Done =====")
logging.info("===== ModelServing Start =====")
outputStr = client.containers.run(config['ModelServing'],'python code.py',volumes=[persistancevolume+':/aion'])
outputStr = outputStr.decode('utf-8')
logging.info('Prediction: '+str(outputStr))
print('Prediction: '+str(outputStr))
logging.info("===== ModelServing Done =====") <s> import os
import sys
import json
from pathlib import Path
import subprocess
import shutil
import argparse
def create_and_save_yaml(git_storage_path, container_label,usecasepath):
file_name_prefix = 'gh-acr-'
yaml_file = f"""\\
name: gh-acr-{container_label}
on:
push:
branches: main
paths: {container_label}/**
workflow_dispatch:
jobs:
gh-acr-build-push:
runs-on: ubuntu-latest
steps:
- name: 'checkout action'
uses: actions/checkout@main
- name: 'azure login'
uses: azure/login@v1
with:
creds: ${{{{ secrets.AZURE_CREDENTIALS }}}}
- name: 'build and push image'
uses: azure/docker-login@v1
with:
login-server: ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}
username: ${{{{ secrets.REGISTRY_USERNAME }}}}
password: ${{{{ secrets.REGISTRY_PASSWORD }}}}
- run: |
docker build ./{container_label}/ModelMonitoring -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelmonitoring:{container_label}
docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelmonitoring:{container_label}
docker build ./{container_label}/DataIngestion -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/dataingestion:{container_label}
docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/dataingestion:{container_label}
docker build ./{container_label}/DataTransformation -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/datatransformation:{container_label}
docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/datatransformation:{container_label}
docker build ./{container_label}/FeatureEngineering -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/featureengineering:{container_label}
docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/featureengineering:{container_label}
docker build ./{container_label}/ModelRegistry -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelregistry:{container_label}
docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelregistry:{container_label}
docker build ./{container_label}/ModelServing -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelserving:{container_label}
docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelserving:{container_label}
"""
arr = [filename for filename in os.listdir(usecasepath) if filename.startswith("ModelTraining")]
for x in arr:
yaml_file+=' docker build ./'+container_label+'/'+x+' -t ${{ secrets.REGISTRY_LOGIN_SERVER }}/'+x.lower()+':'+container_label
yaml_file+='\\n'
yaml_file+=' docker push ${{ secrets.REGISTRY_LOGIN_SERVER }}/'+x.lower()+':'+container_label
yaml_file+='\\n'
with open(Path(git_storage_path)/(file_name_prefix + container_label + '.yaml'), 'w') as f:
f.write(yaml_file)
def run_cmd(cmd):
try:
subprocess.check_output(cmd, stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
if e.stderr:
if isinstance(e.stderr, bytes):
err_msg = e.stderr.decode(sys.getfilesystemencoding())
else:
err_msg = e.stderr
elif e.output:
if isinstance(e.output, bytes):
err_msg = e.output.decode(sys.getfilesystemencoding())
else:
err_msg = e.output
else:
err_msg = str(e)
return False, err_msg
return True, ""
def validate_config(config):
non_null_keys = ['url','username', 'token', 'location', 'gitFolderLocation', 'email', 'modelName']
missing_keys = [k for k in non_null_keys if k not in config.keys()]
if missing_keys:
raise ValueError(f"following fields are missing in config file: {missing_keys}")
for k,v in config.items():
if k in non_null_keys and not v:
raise ValueError(f"Please provide value for '{k}' in config file.")
def upload(config):
validate_config(config)
url_type = config.get('url_type','https')
if url_type == 'https':
https_str = "https://"
url = https_str + config['username'] + ":" + config['token'] + "@" + config['url'][len(https_str):]
else:
url = config['url']
model_location = Path(config['location'])
git_folder_location = Path(config['gitFolderLocation'])
git_folder_location.mkdir(parents=True, exist_ok=True)
(git_folder_location/'.github'/'workflows').mkdir(parents=True, exist_ok=True)
if not model_location.exists():
raise ValueError('Trained model data not found')
os.chdir(str(git_folder_location))
(git_folder_location/config['modelName']).mkdir(parents=True, exist_ok=True)
shutil.copytree(model_location, git_folder_location/config['modelName'], dirs_exist_ok=True)
create_and_save_yaml((git_folder_location/'.github'/'workflows'), config['modelName'],config['location'])
if (Path(git_folder_location)/'.git').exists():
first_upload = False
else:
first_upload = True
if first_upload:
cmd = ['git','init']
status, msg = run_cmd(cmd)
if not status:
raise ValueError(msg)
cmd = ['git','config','user.name',config['username']]
status, msg = run_cmd(cmd)
if not status:
raise ValueError(msg)
cmd = ['git','config','user.email',config['email']]
status, msg = run_cmd(cmd)
if not status:
raise ValueError(msg)
cmd = ['git','add', '-A']
status, msg = run_cmd(cmd)
if not status:
raise ValueError(msg)
cmd = ['git','commit','-m',f"commit {config['modelName']}"]
status, msg = run_cmd(cmd)
if not status:
raise ValueError(msg)
cmd = ['git','branch','-M','main']
status, msg = run_cmd(cmd)
if not status:
raise ValueError(msg)
if first_upload:
cmd = ['git','remote','add','origin', url]
status, msg = run_cmd(cmd)
if not status:
raise ValueError(msg)
cmd = ['git','push','-f','-u','origin', 'main']
status, msg = run_cmd(cmd)
if not status:
raise ValueError(msg)
else:
cmd = ['git','push']
status, msg = run_cmd(cmd)
if not status:
raise ValueError(msg)
return json.dumps |
({'Status':'SUCCESS'})
if __name__ == '__main__':
try:
if shutil.which('git') is None:
raise ValueError("git is not installed on this system")
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', help='Config file location or as a string')
args = parser.parse_args()
if Path(args.config).is_file() and Path(args.config).suffix == '.json':
with open(args.config,'r') as f:
config = json.load(f)
else:
config = json.loads(args.config)
print(upload(config))
except Exception as e:
status = {'Status':'Failure','msg':str(e)}
print(json.dumps(status))<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import json
import logging
import os
import shutil
import time
from sys import platform
from distutils.util import strtobool
# Base class for EION configuration Manager which read the needed f params from eion.json, initialize the parameterlist, read the respective params, store in variables and return back to caller function or external modules.
class OTAionConfigManager:
# eion configuration Constractor
def __init__(self):
self.log = logging.getLogger('eion')
self.data = ''
self.problemType = ''
self.basic = []
self.advance=[]
# To get parameter list of configuration module from json, this will be passed as dict{}
def getEionProfilerConfigurarion(self):
try:
if(self.advance['profiler']):
return self.advance['profiler']
else:
return('NA')
except KeyError:
return('NA')
def getAIONTestTrainPercentage(self):
try:
if(self.advance['testPercentage']):
return int(self.advance['testPercentage'])
else:
return(80)
except KeyError:
return(80)
def getAIONDataBalancingMethod(self):
try:
if(self.advance['categoryBalancingMethod']):
return self.advance['categoryBalancingMethod']
else:
return("oversample")
except KeyError:
return("oversample")
# To get parameter list of selector module params
def getEionSelectorConfiguration(self):
try:
if(self.advance['selector']):
return self.advance['selector']
else:
return('NA')
except KeyError:
return('NA')
def createDeploymentFolders(self,deployFolder,iterName,iterVersion):
usecaseFolderLocation = os.path.join(deployFolder,iterName)
os.makedirs(usecaseFolderLocation,exist_ok = True)
deployLocation = os.path.join(usecaseFolderLocation,str(iterVersion))
try:
os.makedirs(deployLocation)
except OSError as e:
shutil.rmtree(deployLocation)
time.sleep(2)
os.makedirs(deployLocation)
dataFolderLocation = os.path.join(deployLocation,'data')
try:
os.makedirs(dataFolderLocation)
except OSError as e:
print("\\nDeployment Data Folder Already Exists")
logFolderLocation = os.path.join(deployLocation,'log')
try:
os.makedirs(logFolderLocation)
except OSError as e:
print("\\nLog Folder Already Exists")
etcFolderLocation = os.path.join(deployLocation,'etc')
try:
os.makedirs(etcFolderLocation)
except OSError as e:
print("\\ETC Folder Already Exists")
prodFolderLocation = os.path.join(deployLocation,'production')
os.makedirs(prodFolderLocation)
profilerFolderLocation = os.path.join(prodFolderLocation, 'profiler')
os.makedirs(profilerFolderLocation)
modelFolderLocation = os.path.join(prodFolderLocation, 'model')
os.makedirs(modelFolderLocation)
original_data_file = os.path.join(dataFolderLocation,'preprocesseddata.csv')
profiled_data_file = os.path.join(dataFolderLocation,'postprocesseddata.csv')
trained_data_file = os.path.join(dataFolderLocation,'trainingdata.csv')
predicted_data_file = os.path.join(dataFolderLocation,'predicteddata.csv')
logFileName=os.path.join(logFolderLocation,'model_training_logs.log')
outputjsonFile=os.path.join(deployLocation,'etc','output.json')
return(deployLocation,dataFolderLocation,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,logFileName,outputjsonFile)
# To get parameter list of learner module params
def getEionLearnerConfiguration(self):
try:
if(self.advance['onlinelearner_config']):
mllearner_config = self.advance['mllearner_config']
if 'categoryBalancingMethod' not in mllearner_config:
mllearner_config['categoryBalancingMethod'] = 'oversample'
if 'testPercentage' not in mllearner_config:
mllearner_config['testPercentage'] = 20
if 'missingTargetCategory' not in mllearner_config:
mllearner_config['missingTargetCategory'] = ''
return mllearner_config
else:
return('NA')
except KeyError:
return('NA')
except Exception as inst:
self.log.info( '\\n-----> getEionLearnerConfiguration failed!!!.'+str(inst))
return('NA')
def gettimegrouper(self):
try:
if(self.basic['timegrouper']):
return self.basic['timegrouper']
else:
return 'NA'
except:
return 'NA'
def getgrouper(self):
try:
if(self.basic['group']):
return self.basic['group']
else:
return 'NA'
except:
return 'NA'
def getfilter(self):
try:
if(self.basic['filter']):
return self.basic['filter']
else:
return 'NA'
except:
return 'NA'
def getModulesDetails(self):
problem_type = self.problemType
visualizationstatus = self.getEionVisualizationStatus()
profiler_status = self.getEionProfilerStatus()
selector_status = self.getEionSelectorStatus()
learner_status = self.mllearner
targetFeature = self.getTargetFeatures()
deploy_status = self.getEionDeploymentStatus()
if learner_status:
if(problem_type == 'NA'):
learner_status = True
elif(problem_type.lower() in ['classification','regression']):
learner_status = True
else:
learner_status = False
return problem_type,targetFeature,profiler_status,selector_status,learner_status,visualizationstatus,deploy_status
def __get_true_option(self, d, default_value=None):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
def getAlgoName(self, problem_type=None):
if problem_type == None:
problem_type = self.__get_true_option(self.basic['algorithms'])
return self.__get_true_option(self.basic['algorithms'][problem_type])
def getScoringCreteria(self):
return (self.scoringCreteria)
def getEionDeployerConfiguration(self):
try:
if(self.advance['deployer']):
return self.advance['deployer']
else:
return('NA')
except KeyError:
return('NA')
def getAIONRemoteTraining(self):
try:
if(self.advance['remoteTraining']):
self.advance['remoteTraining']['Enable'] = strtobool(self.advance['remoteTraining'].get('Enable', 'False'))
return self.advance['remoteTraining']
else:
remoteTraining = {}
remoteTraining['Enable'] = False
remoteTraining['server'] = None
remoteTraining['ssh'] = None
return(remoteTraining)
except KeyError:
remoteTraining = {}
remoteTraining['Enable'] = False
remoteTraining['server'] = None
remoteTraining['ssh'] = None
return(remoteTraining)
def getEionVisualizationStatus(self):
return(True)
def getEionVisualizationConfiguration(self):
try:
if(self.advance['visualization_settings']):
return(self.advance['visualization_settings'])
else:
return('NA')
except KeyError:
return('NA')
def getEionBatchLearningStatus(self):
try:
if(self.basic['output']['batchLearning']):
return(self.basic['output']['batchLearning'])
else:
return('disable')
except KeyError:
return('disable')
def getEionProblemType(self):
try:
analysis_type = self.basic['analysisType']
self.problemType = ''
for key in analysis_type.keys():
if analysis_type[key] == 'True':
self.problemType = key
break
if self.problemType:
return self.problemType
else:
return('NA')
except KeyError:
return('NA')
def getEionProfilerStatus(self):
try:
if(self.basic['output']['profilerStage']):
return(self.basic['output']['profilerStage'])
else:
return('false')
except KeyError:
return('false')
#To get eion selector module status (enable/disable/none)
def getEionSelectorStatus(self):
try:
if(self.basic['output']['selectorStage']):
return(self.basic['output']['selectorStage'])
else:
return('disable')
except KeyError:
return('disable')
def getEionDeploymentStatus(self):
try:
if(self.basic['output']['deploymentStage']):
return(self.basic['output']['deploymentStage'])
else:
return(False)
except KeyError:
return(False)
def getEionLearnerModelParams(self,modelType):
try:
numberofModels = 0
ml_algorithm_filename = ''
if(modelType == 'classification'):
requiredalgo = 'classifierModelParams'
elif(modelType == 'regression'):
requiredalgo = 'regressorModelParams'
learnerconfig = self.advance['onlinelearner_config']
selectedMLModel = self.mlmodels
modelParams = []
modelList=[]
if 'modelParams' in learnerconfig:
modelParams = learnerconfig['modelParams']
if(str(type(modelParams)) != "<class 'dict'>"):
modelParams = []
elif(len(modelParams) == 0):
modelParams = []
if(len(modelParams) == 0):
if 'modelparamsfile' in learnerconfig:
if(learnerconfig['modelparamsfile'] != ""):
ml_algorithm_filename = learnerconfig['modelparamsfile']
if(ml_algorithm_filename == ''):
ml_algorithm_filename = os.path.dirname(os.path.abspath(__file__))+'/ML_Defaults.json'
modelParams = json.loads(open(ml_algorithm_filename).read())
if requiredalgo in modelParams:
modelParams = modelParams[requiredalgo]
if selectedMLModel != '':
modelList = selectedMLModel.split(",")
modelList = list(map(str.strip, modelList))
for mod in modelList:
if mod not in modelParams:
self.log.info("'"+mod+"' Not Available for Particular Problem Type")
modelList.remove(mod)
else:
modelList = list(modelParams.keys())
#modelList = selectedMLModel.split(",")
if(len(modelList) ==0):
modelList = list(modelParams.keys())
return(modelParams,modelList)
except KeyError:
modelParams = []
modelList=[]
return(modelParams,modelList)
def getTargetFeatures(self):
try:
if(self.basic['targetFeature']):
return(self.basic['targetFeature'])
else:
return('')
except KeyError:
return('')
def getModel |
Features(self):
try:
if(self.basic['trainingFeatures']):
modFeatures = self.basic['trainingFeatures']
modFeatures = modFeatures.split(",")
modFeatures = list(map(str.strip, modFeatures))
modFeatures = ",".join([modf for modf in modFeatures])
return(modFeatures)
else:
return('NA')
except KeyError:
return('NA')
def getFolderSettings(self):
try:
if(self.basic['folderSettings']):
return(self.basic['folderSettings'])
else:
return('NA')
except KeyError:
return('NA')
def getAIONLocationSettings(self):
self.iter_name = self.basic['modelName']
self.iteration_version = self.basic['modelVersion']
if(self.basic['dataLocation']):
dataLocation = self.basic['dataLocation']
else:
dataLocation = 'NA'
if(self.basic['deployLocation']):
deployLocation = self.basic['deployLocation']
else:
deployLocation = 'NA'
try:
if 'csv_settings' in self.basic:
csv_setting = self.basic['csv_settings']
if 'delimiters' in csv_setting:
delimiter = csv_setting['delimiters']
if delimiter.lower() == 'tab':
delimiter = '\\t'
elif delimiter.lower() == 'semicolon':
delimiter = ';'
elif delimiter.lower() == 'comma':
delimiter = ','
elif delimiter.lower() == 'space':
delimiter = ' '
elif delimiter.lower() == 'other':
if 'other' in csv_setting:
delimiter = csv_setting['other']
else:
delimiter = ','
else:
delimiter = ','
else:
delimiter = ','
if 'textqualifier' in csv_setting:
textqualifier = csv_setting['textqualifier']
else:
textqualifier = '"'
else:
delimiter = ','
textqualifier = '"'
except KeyError:
delimiter = ','
textqualifier = '"'
return(self.iter_name,self.iteration_version,dataLocation,deployLocation,delimiter,textqualifier)
def getFeatures(self):
try:
if(self.basic['dateTimeFeature']):
dtFeatures = self.basic['dateTimeFeature']
dtFeatures = dtFeatures.split(",")
dtFeatures = list(map(str.strip, dtFeatures))
dtFeatures = ",".join([dtf for dtf in dtFeatures])
else:
dtFeatures = 'NA'
except KeyError:
dtFeatures = 'NA'
try:
if(self.basic['indexFeature']):
iFeatures = self.basic['indexFeature']
iFeatures = iFeatures.split(",")
iFeatures = list(map(str.strip, iFeatures))
iFeatures = ",".join([dif for dif in iFeatures])
else:
iFeatures = 'NA'
except KeyError:
iFeatures = 'NA'
try:
if(self.basic['trainingFeatures']):
modFeatures = self.basic['trainingFeatures']
modFeatures = modFeatures.split(",")
modFeatures = list(map(str.strip, modFeatures))
modFeatures = ",".join([modf for modf in modFeatures])
else:
modFeatures = 'NA'
except KeyError:
modFeatures = 'NA'
return(dtFeatures,iFeatures,modFeatures)
def setModels(self):
try:
analysis_type = self.basic['analysisType']
#print(analysis_type)
self.problemType = ''
for key in analysis_type.keys():
if analysis_type[key] == 'True':
self.problemType = key
break
if self.problemType not in ['inputDrift','outputDrift']:
conf_algorithm = self.basic['algorithms'][self.problemType]
else:
conf_algorithm = {}
self.mlmodels=''
self.dlmodels=''
self.scoringCreteria = 'NA'
if self.problemType in ['classification','regression']:
scorCre = self.basic['scoringCriteria'][self.problemType]
for key in scorCre.keys():
if scorCre[key] == 'True':
self.scoringCreteria = key
break
#print(self.problemType)
#print(self.scoringCreteria)
if self.scoringCreteria == 'Mean Squared Error':
self.scoringCreteria = 'MSE'
if self.scoringCreteria == 'Root Mean Squared Error':
self.scoringCreteria = 'RMSE'
if self.scoringCreteria == 'Mean Absolute Error':
self.scoringCreteria = 'MAE'
if self.scoringCreteria == 'R-Squared':
self.scoringCreteria = 'R2'
if self.problemType in ['classification','regression']:
for key in conf_algorithm.keys():
if conf_algorithm[key] == 'True':
if self.mlmodels != '':
self.mlmodels += ','
self.mlmodels += key
else:
for key in conf_algorithm.keys():
if conf_algorithm[key] == 'True':
if self.mlmodels != '':
self.mlmodels += ','
self.mlmodels += key
self.mllearner = False
if self.mlmodels != '':
self.mllearner = True
return('done')
except KeyError:
return('NA')
def readConfigurationFile(self,path):
with open(path, 'rb') as data_file:
try:
self.data = json.load(data_file) #loading json object as python dictionary
#print(self.data)
self.basic = self.data['basic']
self.advance = self.data['advance']
problemType = self.setModels()
if(self.basic['output']['profilerStage']):
if(str(type(self.basic['output']['profilerStage'])) != "<class 'str'>"):
msg = "JSON Validation Fail: Profiling Should be String and value should be either enable or disable"
self.log.info(msg)
return False,msg
if((self.basic['output']['profilerStage'].lower() == 'true') & ('profiler' not in self.advance)):
msg = "JSON Validation Fail: Profiler Configuration Not Found in Advance JSON"
self.log.info(msg)
return False,msg
if(str(type(self.advance['profiler'])) != "<class 'dict'>"):
msg = "JSON Validation Fail: Error: Profiler Configuration Syntax"
self.log.info(msg)
return False,msg
if((self.basic['output']['profilerStage'].lower() != 'true') & (self.basic['output']['profilerStage'].lower() != 'false')):
msg = "JSON Validation Fail: Profiling is Not defined Correctly, it should be either enable or disable"
self.log.info(msg)
return False,msg
if(self.basic['output']['selectorStage']):
if(str(type(self.basic['output']['selectorStage'])) != "<class 'str'>"):
msg = "JSON Validation Fail: Selection Should be String and value should be either enable or disable"
self.log.info(msg)
return False,msg
if((self.basic['output']['selectorStage'].lower() == 'true') & ('selector' not in self.advance)):
msg = "JSON Validation Fail: Selector Configuration Not Found"
self.log.info(msg)
return False,msg
if((self.basic['output']['selectorStage'].lower() != 'true') & (self.basic['output']['selectorStage'].lower() != 'false')):
msg = "JSON Validation Fail:: Selection is Not defined Correctly, it should be either enable or disable"
self.log.info(msg)
return False,msg
if(str(type(self.advance['selector'])) != "<class 'dict'>"):
msg = "JSON Validation Fail: Error: Syntax of Selector"
self.log.info(msg)
return False,msg
if 'dataLocation' not in self.basic:
msg = "JSON Validation Fail: Data Location Not Defined"
self.log.info(msg)
return False,msg
if 'deployLocation' not in self.basic:
msg = "JSON Validation Fail: Deploy Location Not Defined"
self.log.info(msg)
return False,msg
if 'deployment' in self.basic:
if(str(type(self.basic['deployment'])) != "<class 'str'>"):
msg = "JSON Validation Fail: deployment Should be String and value should be either enable or disable"
self.log.info(msg)
return False,msg
if(self.basic['deployment'] == 'enable'):
if 'deployer' in self.advance:
if(str(type(self.advance['deployer'])) != "<class 'dict'>"):
msg = "JSON Validation Fail: deployer configuration should be nexted json object"
self.log.info(msg)
return False,msg
else:
msg = "JSON Validation Fail: deployer configuration is missing"
self.log.info(msg)
return False,msg
except ValueError as e:
print("Error"+str(e))
return False,e
return True,'Good'
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import json
def get_true_option(d, default_value=None):
if isinstance(d, dict):
for k, v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
def get_true_options(d):
options = []
if isinstance(d, dict):
for k, v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
options.append(k)
return options
def check_datetime(config):
dateTime = config['basic']['dateTimeFeature']
if dateTime == '' or dateTime.lower()=='na':
return False
return True
def check_dtype(d):
flag= 1
for item in d:
if item["type"].lower() != "text" and item["type"].lower() != "index":
flag = 0
break
return flag
def check_text(d): #task 12627
flag= 0
for item in d:
if item["type"].lower() == "text":
flag = 1
break
return flag
def check_labelencoding(ftr_dict_list, target_ftr):
for ftr_dict in ftr_dict_list:
if ftr_dict['feature']!=target_ftr and ftr_dict['type'].lower()=='categorical' and ftr_dict['categoryEncoding |
'].lower()!='labelencoding':
return False
return True
class timeseries():
def __init__(self,config):
self.config=config
#task 11997
if self.config['basic']['analysisType']['timeSeriesForecasting'].lower()=='true':
self.problemType = 'timeSeriesForecasting'
elif self.config['basic']['analysisType']['timeSeriesAnomalyDetection'].lower()=='true':
self.problemType = 'timeSeriesAnomalyDetection'
def validate_basic_config(self,status='pass',msg=None):
#task 12627
date_time_status = check_datetime(self.config)
text_status = check_text(self.config['advance']['profiler']['featureDict'])
if not date_time_status and text_status:
msg = 'For time series problem,\\\\n* One feature should be in datetime format\\\\n* Text feature not supported '
return 'error', msg
elif not date_time_status:
msg = 'For time series problem, one feature should be in datetime format'
return 'error', msg
elif text_status:
msg = 'For time series problem, text feature not supported '
return 'error', msg
selected_algos = get_true_options(self.config['basic']['algorithms'][self.problemType]) #task 11997
if isinstance(self.config['basic']['targetFeature'],str):
targetFeature = list(self.config['basic']['targetFeature'].split(','))
if self.problemType=='timeSeriesForecasting': #task 11997
if len(targetFeature) > 1:
if 'ARIMA' in selected_algos:
status = 'error'
msg = "ARIMA is not supported for multilabel (target) feature"
return status, msg
if "FBPROPHET" in selected_algos:
status = 'error'
msg = "FBPROPHET is not supported for multiLabel (target) feature"
return status, msg
if 'MLP' in selected_algos:
status = 'error'
msg = "MLP is not supported for multiLabel (target) feature"
return status, msg
if len(targetFeature) == 1 and 'VAR' in selected_algos:
status = 'error'
msg = "VAR is not supported for singleLabel (target) feature"
return status, msg
elif self.problemType=='timeSeriesAnomalyDetection':
anomChecker = anomaly(self.config)
status, msg = anomChecker.validate_basic_config()
return status, msg
class anomaly():
def __init__(self,config):
self.config = config
if self.config['basic']['analysisType']['anomalyDetection']=='':
self.problemType = 'anomalyDetection'
elif self.config['basic']['analysisType']['timeSeriesAnomalyDetection']: #task 11997
self.problemType = 'timeSeriesAnomalyDetection'
def validate_basic_config(self,status='pass',msg=None):
#task 12627
date_time_status = check_datetime(self.config)
targetFeature = self.config['basic']['targetFeature']
if self.problemType=='anomalyDetection' and date_time_status:
status = 'error'
msg = 'Date feature detected. For anomaly detection on time series change problem type to Time Series Anomaly Detection or drop Date feature'
return status, msg
if targetFeature.lower()!= 'na' and targetFeature!= "" and self.config['basic']['inlierLabels'] == '':
status = 'error'
msg = 'Please provide inlier label in case of supervised anomaly detection'
return status, msg
class survival():
def __init__(self,config):
self.config = config
self.problemType= 'survivalAnalysis'
def validate_basic_config(self):
dateTimeStatus = check_datetime(self.config)
labelencoding_status = check_labelencoding(self.config['advance']['profiler']['featureDict'], self.config['basic']['targetFeature'])
if not dateTimeStatus and not labelencoding_status:
msg = 'For survival analysis problem,\\\\n* One feature should be in datetime format\\\\n* Encoding of categorical features should be of label encoding '
return 'error', msg
elif not dateTimeStatus:
msg = 'One feature should be in datetime format for survival analysis problem. Please select it from model feature'
return 'error', msg
elif not labelencoding_status:
msg = 'Categorical features are expected to be label encoded for survival analysis problem. Please select it from feature encoding'
return 'error', msg
else:
return 'pass', " "
class associationrule():
def __init__(self,config):
self.config=config
def validate_basic_config(self,status='pass', msg=None):
if self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['invoiceNoFeature'].lower() == '' or self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['invoiceNoFeature'].lower() == 'na' or self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['itemFeature'].lower() == '' or self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['itemFeature'].lower() == 'na':
return "error","Make sure to configure invoice feature and item feature"
elif self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['invoiceNoFeature'] == self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['itemFeature']:
return "error","Make sure to invoice feature and item feature is configure correctly"
else:
return "pass", " "
class documentsimilarity():
def __init__(self,config):
self.config=config
def validate_basic_config(self,status='pass', msg=None):
flag = check_dtype(self.config['advance']['profiler']['featureDict'])
if flag == 1:
return "pass", " "
else:
msg="Make sure to change the feature type from Catgeory to Text and drop numerical features for document Similarity"
return "error", msg
def config_validate(path):
with open(path, 'rb') as data_file:
config = json.load(data_file)
data_file.close()
try:
problem_type = get_true_option(config['basic']['analysisType'])
status = 'pass'
msg = ''
if 'timeseries' in problem_type.lower(): #task 11997
obj = timeseries(config)
elif problem_type.lower() == 'survivalanalysis':
obj = survival(config)
elif problem_type.lower() == 'anomalydetection':
obj = anomaly(config)
elif problem_type.lower() in ['similarityidentification','contextualsearch']:
obj = documentsimilarity(config)
elif problem_type.lower() == 'recommendersystem':
if config['basic']['algorithms']['recommenderSystem']['AssociationRules-Apriori'].lower() == 'true':
obj = associationrule(config)
else:
return 'pass',""
else:
return 'pass',""
status,msg= obj.validate_basic_config()
return(status,msg)
except Exception as e:
print(e)
def start_check(config):
return config_validate(config)
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import json
from pathlib import Path
from distutils.util import strtobool
class code_configure():
def __init__(self):
self.code_config = {}
self.unsupported_algo = []
self.supported_model = {"classification":{"Logistic Regression": "LogisticRegression", "Naive Bayes": "GaussianNB", "Decision Tree": "DecisionTreeClassifier", "Random Forest": "RandomForestClassifier", "Support Vector Machine": "SVC", "K Nearest Neighbors": "KNeighborsClassifier", "Gradient Boosting": "GradientBoostingClassifier", "Extreme Gradient Boosting (XGBoost)":"XGBClassifier", "Light Gradient Boosting (LightGBM)": "LGBMClassifier","Categorical Boosting (CatBoost)": "CatBoostClassifier"},
"regression":{"Linear Regression": "LinearRegression", "Lasso": "Lasso", "Ridge": "Ridge", "Decision Tree": "DecisionTreeRegressor", "Random Forest": "RandomForestRegressor", "Extreme Gradient Boosting (XGBoost)": "XGBRegressor", "Light Gradient Boosting (LightGBM)": "LGBMRegressor","Categorical Boosting (CatBoost)": "CatBoostRegressor"},"timeSeriesForecasting":{"MLP": "MLP","LSTM":"LSTM"}} #task 11997
def __get_true_option(self, d, default_value=None):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
def __get_true_options(self, d):
options = []
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
options.append(k)
return options
def __get_scoring_criteria(self, criteria):
mapping = {'Mean Squared Error':'MSE', 'Root Mean Squared Error':'RMSE','Mean Absolute Error':'MAE','R-Squared':'R2'}
if criteria in mapping.keys():
return mapping[criteria]
return criteria
def __get_feature_selector(self, selector_config):
feature_selector = []
if self.__get_true_option(selector_config['selectionMethod']) == 'featureSelection':
feature_selector = self.__get_true_options(selector_config['featureSelection'])
return feature_selector
def __get_feature_reducer(self, selector_config):
feature_reducer = ""
if self.__get_true_option(selector_config['selectionMethod']) == 'featureEngineering':
feature_reducer = self.__get_true_option(selector_config['featureEngineering'],'pca').lower()
return feature_reducer
def __getOptimizationParam(self, param_config):
param_dict = {}
param_dict['iterations'] = int(param_config['iterations'])
param_dict['trainTestCVSplit'] = int(param_config['trainTestCVSplit'])
param_dict['geneticparams'] = param_config['geneticparams']
return param_dict
def add_model(self, model_name, config):
if not self.unsupported_algo:
self.code_config["algorithms"][model_name] = config.copy()
def update_config(self, key, value):
self.code_config[key] = value
def save_config(self, file_path):
if Path(file_path).is_dir():
file_path = Path(file_path)/'etc/code_config.json'
with open(file_path, "w") as f:
if not self.unsupported_algo:
json.dump(self.code_config, f, indent=4)
else:
if 'ensemble' in self.unsupported_algo:
json.dump({"Status":"Failure","msg":"Ensemble is not supported","error":"Ensemble is not supported"}, f) # keep error key
elif 'text_features' in self.unsupported_algo:
json.dump({"Status":"Failure","msg":"Text feature processing is not supported","error":"Text feature processing is not supported"}, f) # keep error key
else:
json.dump({"Status":"Failure","msg":f"Unsupported model {self.unsupported_algo}","error":f"Unsupported model {self.unsupported_algo}"}, f) # keep error key
def __is_algo_supported(self, config):
problem_type = self.__get_true_option(config['basic']['analysisType'])
if problem_type not in self.supported_model.keys():
self.unsupported_algo = [problem_type]
return False
algos = config['basic']['algorithms'][problem_type]
algos = self.__get_true_options(algos)
self.unsupported_algo = [x for x in algos if x not in self.supported_model[problem_type].keys()]
if self.unsupported_algo:
return False
return True
def create_config(self, config):
if isinstance(config, str):
with open(config,'r') as f:
config = json.load(f)
problem_type = self.__get_true_option(config['basic']['analysisType'])
self.code_config["problem_type"] = problem_type.lower()
if not self.__is_algo_supported(config):
return
if 'ensemble' in config['advance']['mllearner_config']:
if config['advance']['mllearner_config']['ensemble'] == 'enable':
self.unsupported_algo = ['ensemble']
return
self.code_config["modelName"] = config['basic']['modelName']
self.code_config["modelVersion"] = config['basic']['modelVersion']
if config['basic']['folderSettings']['fileType'].lower() == 'url':
self.code_config["dataLocation"] = config['basic']['folderSettings']['labelDataFile']
else:
self.code_config["dataLocation"] = config['basic']['dataLocation']
self.code_config["target_feature"] = config['basic']['targetFeature']
trainingfeatures = config['basic']['trainingFeatures'].split(',')
datetimeFeature = list(map(str.strip, config['basic']['dateTimeFeature'].split(',')))
for dtfeature in datetimeFeature:
if dtfeature in trainingfeatures:
trainingfeatures.remove(dtfeature)
indexFeature = list(map(str.strip, config['basic']['indexFeature'].split(',')))
for dtfeature in indexFeature:
if dtfeature in trainingfeatures:
trainingfeatures.remove(dtfeature)
self.code_config["selected_features"] = trainingfeatures
self.code_config["dateTimeFeature"] = datetimeFeature
self.code_config["pro |
filer"] = config['advance']['profiler']
self.code_config["feature_selector"]= self.__get_feature_selector(config['advance']['selector'])
self.code_config["feature_reducer"]= self.__get_feature_reducer(config['advance']['selector'])
self.code_config["corr_threshold"]= float(config['advance']['selector']['statisticalConfig'].get('correlationThresholdTarget',0.85))
self.code_config["var_threshold"]= float(config['advance']['selector']['statisticalConfig'].get('varianceThreshold',0.01))
self.code_config["pValueThreshold"]= float(config['advance']['selector']['statisticalConfig'].get('pValueThresholdTarget',0.04))
self.code_config["n_components"]= int(config['advance']['selector']['featureEngineering']['numberofComponents'])
self.code_config["balancingMethod"] = config['advance']['categoryBalancingMethod']
self.code_config["test_ratio"] = int(config['advance']['testPercentage'])/100
#self.code_config["scoring_criteria"] = "accuracy"
if self.code_config["problem_type"] in ['classification','regression']:
self.code_config["algorithms"] = {}
else:
algo = self.__get_true_option(config['basic']['algorithms'][problem_type])
self.code_config["algorithms"] = {algo: config['advance'][problem_type]['modelParams'][algo]} #task 11997
self.code_config["scoring_criteria"] = self.__get_scoring_criteria(self.__get_true_option(config['basic']["scoringCriteria"][problem_type]))
if problem_type.lower() == 'timeseriesforecasting': #task 11997
self.code_config["lag_order"] = self.code_config["algorithms"][algo]["lag_order"]
self.code_config["noofforecasts"] = config["basic"]["noofforecasts"]
self.code_config["target_feature"] = config['basic']['targetFeature'].split(',')
self.code_config["optimization"] = config['advance']['mllearner_config']['optimizationMethod']
self.code_config["optimization_param"] = self.__getOptimizationParam(config['advance']['mllearner_config']['optimizationHyperParameter'])
if __name__ == '__main__':
codeConfigure = code_configure()
codeConfigure.create_config("C:\\\\Users\\\\vashistah\\\\AppData\\\\Local\\\\HCLT\\\\AION\\\\config\\\\AION_1668151242.json")
codeConfigure.save_config(r"C:\\Users\\vashistah\\AppData\\Local\\HCLT\\AION\\target\\AION_57_ts_1")<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import json
def getDebiasingDetail(self):
try:
if(self.advance['profiler']['deBiasing']):
dlconfig = self.advance['profiler']['deBiasing']
return dlconfig
else:
return('NA')
except KeyError:
return('NA')
def getFirstDocumentFeature(self):
if 'firstDocFeature' in self.basic:
firstDocFeature = self.basic['algorithms']['recommenderSystem']['textSimilarityConfig']['baseFeature']
else:
firstDocFeature = ''
return(firstDocFeature)
def getSecondDocumentFeature(self):
if 'secondDocFeature' in self.basic:
secondDocFeature = self.basic['algorithms']['recommenderSystem']['textSimilarityConfig']['comparisonFeature']
else:
secondDocFeature = ''
return(secondDocFeature)
def getEionInliers(self):
if 'inlierLabels' in self.basic:
self.inlierLabels = self.basic['inlierLabels']
else:
self.inlierLabels = 'NA'
return (self.inlierLabels)
def getEionProfilerConfigurarion(self):
try:
if(self.advance['profiler']):
return self.advance['profiler']
else:
return('NA')
except KeyError:
return('NA')
def getAIONTestTrainPercentage(self):
try:
return (int(self.advance.get('testPercentage',20)))
except KeyError:
return(20)
def getAIONDataBalancingMethod(self):
try:
if(self.advance['categoryBalancingMethod']):
return self.advance['categoryBalancingMethod']
else:
return("oversample")
except KeyError:
return("oversample")
def getEionSelectorConfiguration(self):
try:
if(self.advance['selector']):
return self.advance['selector']
else:
return('NA')
except KeyError:
return('NA')
def getEionDeepLearnerConfiguration(self):
try:
if(self.advance['dllearner_config']):
dlconfig = self.advance['dllearner_config']
if 'categoryBalancingMethod' not in dlconfig:
dlconfig['categoryBalancingMethod'] = ''
if 'testPercentage' not in dlconfig: #Unnati
dlconfig['testPercentage'] = 20 #Unnati
return dlconfig
else:
return('NA')
except KeyError:
return('NA')
def gettimegrouper(self):
try:
if(self.basic['timegrouper']):
return self.basic['timegrouper']
else:
return 'NA'
except:
return 'NA'
def getgrouper(self):
try:
if(self.basic['group']):
return self.basic['group']
else:
return 'NA'
except:
return 'NA'
def getfilter(self):
try:
if(self.basic['filter']):
return self.basic['filter']
else:
return 'NA'
except:
return 'NA'
def getNumberofForecasts(self):
try:
if(self.basic['noofforecasts']):
return int(self.basic['noofforecasts'])
else:
return (-1)
except:
return (-1)
##To get multivariate feature based anomaly detection status
def getMVFeaturebasedAD(self):
try:
dict_ae=self.basic['analysisApproach']['timeSeriesAnomalyDetection']['AutoEncoder'] #task 11997
if(dict_ae):
return (dict_ae)
else:
return (-1)
except:
return (-1)
def getEionDeployerConfiguration(self):
try:
if(self.advance['deployer']):
return self.advance['deployer']
else:
return('NA')
except KeyError:
return('NA')
def getEionAssociationRuleConfiguration(self):
try:
if(self.advance['associationrule']):
return self.advance['associationrule']
else:
return('NA')
except KeyError:
return('NA')
def getEionObjectDetectionConfiguration(self):
try:
if(self.advance['objectDetection']):
return self.advance['objectDetection']
else:
return('NA')
except KeyError:
return('NA')
def getEionTimeSeriesConfiguration(self):
try:
if(self.advance['timeSeriesForecasting']): #task 11997
return self.advance['timeSeriesForecasting']
else:
return('NA')
except KeyError:
return('NA')
def getAIONAnomalyDetectionConfiguration(self):
try:
if(self.advance['anomalyDetection']):
return self.advance['anomalyDetection']
else:
return('NA')
except KeyError:
return('NA')
def getAIONTSAnomalyDetectionConfiguration(self): #task 11997
try:
if(self.advance['timeSeriesAnomalyDetection']):
return self.advance['timeSeriesAnomalyDetection']
else:
return('NA')
except KeyError:
return('NA')
def getEionVisualizationConfiguration(self):
try:
if(self.advance['visualization_settings']):
return(self.advance['visualization_settings'])
else:
return('NA')
except KeyError:
return('NA')
def getEionRecommenderConfiguration(self):
try:
if(self.advance['recommenderparam']):
return self.advance['recommenderparam']
else:
return('NA')
except KeyError:
return('NA')
def getAionNASConfiguration(self):
try:
if(self.advance['neuralarchsearch']):
return self.advance['neuralarchsearch']
else:
return('NA')
except KeyError:
return('NA')
def getEionProfilerStatus(self):
try:
if(self.basic['output']['profilerStage']):
return(self.basic['output']['profilerStage'])
else:
return('false')
except KeyError:
return('false')
def getEionSelectorStatus(self):
try:
if(self.basic['output']['selectorStage']):
return(self.basic['output']['selectorStage'])
else:
return('disable')
except KeyError:
return('disable')
def getEionDeploymentStatus(self):
try:
if(self.basic['output']['deploymentStage']):
return(self.basic['output']['deploymentStage'])
else:
return(False)
except KeyError:
return(False)
def __get_true_option(d, default_value=None):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
def getNASStatus(self):
try:
if(self.dlmodels):
return(self.dlmodels)
else:
return('NA')
except KeyError:
return('NA')
def getTargetFeatures(self):
try:
if(self.basic['targetFeature']):
return(self.basic['targetFeature'])
else:
return('')
except KeyError:
return('')
def getFolderSettings(self):
try:
if(self.basic['folderSettings']):
return(self.basic['folderSettings'])
else:
return('NA')
except KeyError:
return('NA')
def getFilterExpression(self):
try:
if(self.basic['filterExpression']):
return (self.basic['filterExpression'])
else:
return None
except KeyError:
return None
def setModels(self):
try:
analysis_type = self.basic['analysisType']
#print(analysis_type)
self.problemType = ''
for key in analysis_type.keys():
if analysis_type[key] == 'True':
self.problemType = key
break
if self.problemType == 'summarization':
self.problemType = 'classification'
self.summarize = True
if self.problemType not in ['inputDrift','outputDrift']:
conf_algorithm = self.basic['algorithms'][self.problemType]
else:
conf_algorithm = {}
self.mlmodels=''
self.dlmodels=''
self.scoringCreteria = 'NA'
if self.problemType in ['classification','regression','survivalAnalysis','timeSeriesForecasting']: #task 11997
scorCre = self.basic['scoringCriteria'][self.problemType]
for key in scorCre.keys():
if scorCre[key] == 'True':
self.scoringCreteria = key
break
if self.problemType.lower() == 'timeseriesforecasting': #task 11997
self.mllearner=False #task 11997 removed initialising self.ml models as timeSeriesForecasting
if self.scoringCreteria == 'Mean Squared Error':
self.scoringCreteria = 'MSE'
if self.scoringCreteria == 'Root Mean Squared Error':
self.scoringCreteria = 'RMSE'
if self.scoringCreteria == 'Mean Absolute Error':
self.scoringCreteria = 'MAE'
if self.scoringCreteria == 'R-Squared':
self.scoringCreteria = 'R2'
if self.problemType in ['similarityIdentification','contextualSearch']:
self.scoringCreteria = __get_true_option(self.basic['scoringCriteria'][self.problemType], "Cosine Similarity")
if self.problemType in ['classification','regression']:
for key in conf_algorithm.keys():
if conf_algorithm[key] == 'True':
if key not in ['Recurrent Neural Network','Convolutional Neural Network (1D)','Neural Network','Recurrent Neural Network (GRU)','Recurrent Neural Network (LSTM)','GoogleModelSearch_DNN']:
if self.mlmodels != '':
self.mlmodels += ','
self.mlmodels += key
else:
if self.dlmodels != '':
self.dlmodels += ','
self.dlmodels += key
elif self.problemType in ['videoForecasting','imageClassification','objectDetection']:
for key in conf_algorithm.keys():
if conf_algorithm[key] == 'True':
if self.dlmodels != '':
self.dlmodels += ','
self.dlmodels += key
elif self.problemType == 'recommenderSystem':
problem_model = ''
for key in conf_algorithm.keys():
if key not in ['itemRatingConfig','textSimilarityConfig']:
if conf_algorithm[key] == 'True':
problem_model = key
break
if problem_model == 'ItemRating':
self.mlmodels = 'SVD'
elif problem_model == 'AssociationRules-Apriori':
self.mlmodels = 'Apriori'
self.problemType = 'AssociationRules'
elif problem_model == 'TextSimilarity-Siamese':
self.mlmodels = 'Siamese'
self.problemType = 'TextSimilarity'
else:
for key in conf |
_algorithm.keys():
if conf_algorithm[key] == 'True':
if self.mlmodels != '':
self.mlmodels += ','
self.mlmodels += key
self.mllearner = False
self.dllearner = False
if self.mlmodels != '':
self.mllearner = True
if self.advance['mllearner_config']['Stacking (Ensemble)'] == 'True':
self.mlmodels += ','
self.mlmodels += 'Stacking (Ensemble)'
if self.advance['mllearner_config']['Voting (Ensemble)'] == 'True':
self.mlmodels += ','
self.mlmodels += 'Voting (Ensemble)'
if self.dlmodels != '':
self.dllearner = True
return('done')
except KeyError:
return('NA')
def readConfigurationFile(self, path):
if isinstance( path, dict):
self.data = path
else:
with open(path, 'r') as data_file:
self.data = json.load(data_file) #loading json object as python dictionary
self.basic = self.data['basic']
self.advance = self.data['advance']
problemType = self.setModels()
if 'output' in self.basic:
if(self.basic['output']['profilerStage']):
if(str(type(self.basic['output']['profilerStage'])) != "<class 'str'>"):
msg = "JSON Validation Fail: Profiling Should be String and value should be either enable or disable"
self.log.info(msg)
return False,msg
if((self.basic['output']['profilerStage'].lower() == 'true') & ('profiler' not in self.advance)):
msg = "JSON Validation Fail: Profiler Configuration Not Found in Advance JSON"
self.log.info(msg)
return False,msg
if(str(type(self.advance['profiler'])) != "<class 'dict'>"):
msg = "JSON Validation Fail: Error: Profiler Configuration Syntax"
self.log.info(msg)
return False,msg
if((self.basic['output']['profilerStage'].lower() != 'true') & (self.basic['output']['profilerStage'].lower() != 'false')):
msg = "JSON Validation Fail: Profiling is Not defined Correctly, it should be either enable or disable"
self.log.info(msg)
return False,msg
if(self.basic['output']['selectorStage']):
if(str(type(self.basic['output']['selectorStage'])) != "<class 'str'>"):
msg = "JSON Validation Fail: Selection Should be String and value should be either enable or disable"
self.log.info(msg)
return False,msg
if((self.basic['output']['selectorStage'].lower() == 'true') & ('selector' not in self.advance)):
msg = "JSON Validation Fail: Selector Configuration Not Found"
self.log.info(msg)
return False,msg
if((self.basic['output']['selectorStage'].lower() != 'true') & (self.basic['output']['selectorStage'].lower() != 'false')):
msg = "JSON Validation Fail:: Selection is Not defined Correctly, it should be either enable or disable"
self.log.info(msg)
return False,msg
if(str(type(self.advance['selector'])) != "<class 'dict'>"):
msg = "JSON Validation Fail: Error: Syntax of Selector"
self.log.info(msg)
return False,msg
if 'dataLocation' not in self.basic:
msg = "JSON Validation Fail: Data Location Not Defined"
self.log.info(msg)
return False,msg
if 'deployLocation' not in self.basic:
msg = "JSON Validation Fail: Deploy Location Not Defined"
self.log.info(msg)
return False,msg
if 'deployment' in self.basic:
if(str(type(self.basic['deployment'])) != "<class 'str'>"):
msg = "JSON Validation Fail: deployment Should be String and value should be either enable or disable"
self.log.info(msg)
return False,msg
if(self.basic['deployment'] == 'enable'):
if 'deployer' in self.advance:
if(str(type(self.advance['deployer'])) != "<class 'dict'>"):
msg = "JSON Validation Fail: deployer configuration should be nexted json object"
self.log.info(msg)
return False,msg
else:
msg = "JSON Validation Fail: deployer configuration is missing"
self.log.info(msg)
return False,msg
return True,'Good'
def getSurvivalEventColumn(self):
try:
if(self.advance['survival_config']):
survival_config = self.advance['survival_config']
if 'input' in survival_config:
inp = survival_config['input']
if not isinstance(inp, dict):
return None
elif 'event_col' in inp:
e = inp['event_col']
if not isinstance(e, str):
return None
return (e)
else:
return None
else:
return None
else:
return None
except KeyError:
return None
def getSurvivalDurationColumn(self):
try:
if(self.advance['survival_config']):
survival_config = self.advance['survival_config']
if 'input' in survival_config:
inp = survival_config['input']
if not isinstance(inp, dict):
return None
elif 'duration_col' in inp:
t = inp['duration_col']
if not isinstance(t, str):
return None
return (t)
else:
return None
else:
return None
else:
return None
except KeyError:
return None
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import json
import logging
import os
import shutil
import time
import importlib
from sys import platform
from pathlib import Path
from distutils.util import strtobool
import config_manager.pipeline_config_reader as cs
# Base class for EION configuration Manager which read the needed f params from eion.json, initialize the parameterlist, read the respective params, store in variables and return back to caller function or external modules.
class AionConfigManager:
def getDebiasingDetail(self):
return cs.getDebiasingDetail(self)
# eion configuration Constractor
def __init__(self):
self.log = logging.getLogger('eion')
self.data = ''
self.problemType = ''
self.basic = []
self.advance=[]
self.summarize = False
#To get the inliner labels for eion anomaly detection
def get_text_feature(self):
self.text_features = []
feat_dict = self.advance['profiler']['featureDict']
for feat in feat_dict:
if feat.get('type') == 'text':
if feat.get('feature'):
self.text_features.append(feat['feature'])
return self.text_features
def validate_config(self):
status = True
error_id = ''
msg = ''
conversion_method = self.__get_true_option(self.advance.get('profiler',{}).get('textConversionMethod',{}))
is_text_feature = self.get_text_feature()
if is_text_feature and conversion_method.lower() == 'fasttext':
status = importlib.util.find_spec('fasttext')
if not status:
error_id = 'fasttext'
msg = 'fastText is not installed. Please install fastText'
return status,error_id, msg
def getTextlocation(self):
text_data = self.basic["dataLocation"]
return text_data
def getTextSummarize(self):
algo = self.basic['algorithms']['textSummarization']
for key in algo:
if algo[key] == 'True':
algoname = key
method = self.advance['textSummarization']['summaryLength']
for key in method:
if method[key] == 'True':
methodname = key
return algoname,methodname
def getAssociationRuleFeatures(self):
if 'invoiceNoFeature' in self.basic['algorithms']['recommenderSystem']['associationRulesConfig']:
invoiceNoFeature = self.basic['algorithms']['recommenderSystem']['associationRulesConfig']['invoiceNoFeature']
else:
invoiceNoFeature =''
if 'itemFeature' in self.basic['algorithms']['recommenderSystem']['associationRulesConfig']:
itemFeature = self.basic['algorithms']['recommenderSystem']['associationRulesConfig']['itemFeature']
else:
itemFeature =''
return invoiceNoFeature,itemFeature
def getFirstDocumentFeature(self):
return cs.getFirstDocumentFeature(self)
def getSecondDocumentFeature(self):
return cs.getSecondDocumentFeature(self)
def getEionTextSimilarityConfig(self):
return self.advance['textSimilarityConfig']
def getEionTextSummarizationConfig(self):
return self.basic['dataLocation'],self.basic['deployLocation'] ,self.basic['textSummarization']['KeyWords'],self.basic['textSummarization']['pathForKeywordFile']
def getEionInliers(self):
return cs.getEionInliers(self)
#To get the selected models for eion anomaly detection
def getEionanomalyModels(self):
self.anomalyModels = self.mlmodels
return (self.anomalyModels)
# To get parameter list of configuration module from json, this will be passed as dict{}
def getEionProfilerConfigurarion(self):
return cs.getEionProfilerConfigurarion(self)
def getAIONTestTrainPercentage(self):
return cs.getAIONTestTrainPercentage(self)
def getModelEvaluationConfig(self):
try:
return request.POST.get('mydata',{})
except Exception as e:
return({})
def getAIONDataBalancingMethod(self):
return cs.getAIONDataBalancingMethod(self)
def updateFeatureSelection(self, selectorConfig,codeConfigure,vectorizer=False):
if vectorizer:
selectorConfig['selectionMethod']['featureSelection'] = 'True'
selectorConfig['featureSelection']['allFeatures'] = 'True'
selectorConfig['featureSelection']['statisticalBased'] = 'False'
selectorConfig['featureSelection']['modelBased'] = 'False'
codeConfigure.update_config("feature_selector", ['allFeatures'])
# To get parameter list of selector module params
def getEionSelectorConfiguration(self):
return cs.getEionSelectorConfiguration(self)
def createDeploymentFolders(self,deployFolder,iterName,iterVersion):
usecase = '{}{}{}'.format(iterName, '_' if iterVersion != '' else '', iterVersion)
folders = ['data','log','model','script','etc']
skip_delete = ['log']
deployLocation = Path(deployFolder)/iterName/iterVersion
deployLocation.mkdir(parents=True, exist_ok=True)
# delete previous failed/trained use case outputs except log folder
# as logging is already enabled for current usecase
for x in deployLocation.iterdir():
if x.is_file(): # bug 13315 delete existing files
x.unlink()
elif x.is_dir():
if x.stem not in skip_delete:
shutil.rmtree( x)
for folder in folders:
(deployLocation/folder).mkdir( parents=True, exist_ok=True)
(deployLocation/'log'/'img').mkdir( parents=True, exist_ok=True)
data_location = deployLocation/'data'
paths = {
'usecase': str(deployLocation.parent),
'deploy': str(deployLocation),
'data': str(deployLocation/'data'),
'image': str(deployLocation/'log'/'img'),
}
files = {
'original': str(data_location/'preprocesseddata.csv.gz'),
'profiled': str(data_location/'postprocesseddata.csv.gz'),
'reduction': str(data_location/'reductiondata.csv'),
'trained': str(data_location/'trainingdata.csv'),
'predicted': str(data_location/'predicteddata.csv.gz'),
'logs': str(deployLocation/'log'/'model_training_logs.log'),
'output': str(deployLocation/'etc'/'output.json'),
}
return( paths['usecase'],paths['deploy'],paths['data'],paths['image'],files['original'],files['profiled'],files['trained'],files['predicted'],files['logs'],files['output'],files['reduction'])
# To get parameter list of learner module params
def getEionLearnerConfiguration(self):
try:
if(self.advance['mllearner_config']):
mllearner_config = self.advance['mllearner_config']
if 'categoryBalancingMethod' not in mllearner_config:
mllearner_config['categoryBalancingMethod'] = 'oversample'
if 'testPercentage' not in mllearner_config:
mllearner_config['testPercentage'] = 20
if 'missingTargetCategory' not in mllearner_config:
mllearner_config['missingTargetCategory'] = ''
mllearner_config['modelParams']['classifierModelParams']['Deep Q Network'] = self.advance['rllearner_config']['modelParams']['classifierModelParams']['Deep Q Network']
mllearner_config['modelParams']['classifierModelParams']['Neural Architecture Search'] = self.advance['dllearner_config']['modelParams']['classifierModelParams']['Neural Architecture Search']
|
mllearner_config['modelParams']['classifierModelParams']['Dueling Deep Q Network'] = self.advance['rllearner_config']['modelParams']['classifierModelParams']['Dueling Deep Q Network']
mllearner_config['modelParams']['regressorModelParams']['Deep Q Network'] = self.advance['rllearner_config']['modelParams']['regressorModelParams']['Deep Q Network']
mllearner_config['modelParams']['regressorModelParams']['Dueling Deep Q Network'] = self.advance['rllearner_config']['modelParams']['regressorModelParams']['Dueling Deep Q Network']
mllearner_config['modelParams']['regressorModelParams']['Neural Architecture Search'] = self.advance['dllearner_config']['modelParams']['regressorModelParams']['Neural Architecture Search']
return mllearner_config
else:
return('NA')
except KeyError:
return('NA')
except Exception as inst:
self.log.info( '\\n-----> getEionLearnerConfiguration failed!!!.'+str(inst))
return('NA')
def getEionDeepLearnerConfiguration(self):
return cs.getEionDeepLearnerConfiguration(self)
def gettimegrouper(self):
return cs.gettimegrouper(self)
def getgrouper(self):
return cs.getgrouper(self)
def getfilter(self):
return cs.getfilter(self)
def getNumberofForecasts(self):
return cs.getNumberofForecasts(self)
##To get multivariate feature based anomaly detection status
def getMVFeaturebasedAD(self):
return cs.getMVFeaturebasedAD(self)
def getModulesDetails(self):
problem_type = self.problemType
visualizationstatus = self.getEionVisualizationStatus()
profiler_status = self.getEionProfilerStatus()
selector_status = self.getEionSelectorStatus()
learner_status = self.mllearner
deeplearner_status = self.dllearner
targetFeature = self.getTargetFeatures()
deploy_status = self.getEionDeploymentStatus()
VideoProcessing = False
similarityIdentificationStatus = False
contextualSearchStatus = False
anomalyDetectionStatus = False
if problem_type.lower() == 'survivalanalysis':
survival_analysis_status = True
selector_status = False
associationRuleStatus = 'disable'
timeseriesStatus = 'disable'
learner_status = False
deeplearner_status = False
else:
survival_analysis_status = False
if problem_type.lower() == 'textsimilarity':
selector_status = False
learner_status = False
deeplearner_status = False
timeseriesStatus = 'disable'
associationRuleStatus = 'disable'
inputDriftStatus = 'disable'
textSimilarityStatus = True
else:
textSimilarityStatus = False
if problem_type.lower() == 'inputdrift':
inputDriftStatus = True
profiler_status = False
selector_status = False
learner_status = False
deeplearner_status = False
timeseriesStatus = 'disable'
associationRuleStatus = 'disable'
deploy_status = False
visualizationstatus = False
else:
inputDriftStatus = False
if problem_type.lower() == 'outputdrift':
outputDriftStatus = True
profiler_status = False
selector_status = False
learner_status = False
deeplearner_status = False
timeseriesStatus = 'disable'
associationRuleStatus = 'disable'
deploy_status = False
visualizationstatus = False
else:
outputDriftStatus = False
if problem_type.lower() == 'recommendersystem':
recommenderStatus = True
#profiler_status = 'disable'
selector_status = False
learner_status = False
deeplearner_status = False
timeseriesStatus = 'disable'
associationRuleStatus = 'disable'
#Task 11190
visualizationstatus = False
else:
recommenderStatus = False
'''
if profiler_status.lower() == 'enable':
profiler_status = True
else:
profiler_status = False
if selector_status.lower() == 'enable':
selector_status = True
else:
selector_status = False
if visualizationstatus.lower() == 'enable':
visualizationstatus = True
else:
visualizationstatus = False
'''
if learner_status:
if(problem_type == 'NA'):
learner_status = True
elif(problem_type.lower() in ['classification','regression','clustering','anomalydetection', 'topicmodelling', 'objectdetection', 'timeseriesanomalydetection']): #task 11997
learner_status = True
else:
learner_status = False
if problem_type.lower() == 'anomalydetection' or problem_type.lower() == 'timeseriesanomalydetection': #task 11997
anomalyDetectionStatus = True
if deeplearner_status:
if(problem_type.lower() == 'na'):
deeplearner_status = True
elif(problem_type.lower() in ['classification','regression']):
deeplearner_status = True
else:
deeplearner_status = False
if(targetFeature == ''):
deeplearner_status = False
if problem_type.lower() == 'timeseriesforecasting': #task 11997
timeseriesStatus = True
profiler_status = True #task 12627
selector_status = False
learner_status = False
deeplearner_status = False
associationRuleStatus = 'disable'
else:
timeseriesStatus = False
if problem_type.lower() == 'videoforecasting':
forecastingStatus = True
timeseriesStatus = False
profiler_status = True
selector_status = False
learner_status = False
deeplearner_status = False
associationRuleStatus = 'disable'
else:
forecastingStatus = False
if problem_type.lower() == 'imageclassification':
imageClassificationStatus = True
timeseriesStatus = False
profiler_status = False
selector_status = False
learner_status = False
deeplearner_status = False
associationRuleStatus = 'disable'
else:
imageClassificationStatus = False
if problem_type.lower() == 'associationrules':
associationRuleStatus = True
timeseriesStatus = False
profiler_status = False
selector_status = False
learner_status = False
deeplearner_status = False
visualizationstatus = False
else:
associationRuleStatus = False
if problem_type.lower() == 'statetransition':
stateTransitionStatus = True
objectDetectionStatus = False
imageClassificationStatus = False
timeseriesStatus = False
profiler_status = False
selector_status = False
learner_status = False
deeplearner_status = False
associationRuleStatus = False
visualizationstatus = False
else:
stateTransitionStatus = False
if problem_type.lower() == 'objectdetection':
objectDetectionStatus = True
imageClassificationStatus = False
timeseriesStatus = False
profiler_status = False
selector_status = False
learner_status = False
deeplearner_status = False
associationRuleStatus = False
visualizationstatus = False
else:
objectDetectionStatus = False
if problem_type.lower() == 'similarityidentification':
similarityIdentificationStatus = True
objectDetectionStatus = False
imageClassificationStatus = False
timeseriesStatus = False
selector_status = False
learner_status = False
deeplearner_status = False
associationRuleStatus = False
visualizationstatus = False
self.updateEmbeddingForDocSimilarity()
else:
similarityIdentificationStatus = False
if problem_type.lower() == 'contextualsearch':
contextualSearchStatus = True
objectDetectionStatus = False
imageClassificationStatus = False
timeseriesStatus = False
selector_status = False
learner_status = False
deeplearner_status = False
associationRuleStatus = False
visualizationstatus = False
self.updateEmbeddingForContextualsearch()
else:
contextualSearchStatus = False
if problem_type.lower() == 'textsummarization':
textSummarization = True
profiler_status = False
selector_status = False
else:
textSummarization = False
'''
if deploy_status.lower() == 'enable':
deploy_status = True
else:
deploy_status = False
'''
#print(inputDriftStatus)
return problem_type,targetFeature,profiler_status,selector_status,learner_status,deeplearner_status,timeseriesStatus,textSummarization,survival_analysis_status,textSimilarityStatus,inputDriftStatus,outputDriftStatus,recommenderStatus,visualizationstatus,deploy_status,associationRuleStatus,imageClassificationStatus,forecastingStatus,objectDetectionStatus,stateTransitionStatus,similarityIdentificationStatus,contextualSearchStatus,anomalyDetectionStatus
def __get_true_option(self, d, default_value=None):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
def updateEmbeddingForDocSimilarity(self):
method = self.__get_true_option(self.basic['algorithms']['similarityIdentification'])
textConversionMethods = self.advance['profiler']['textConversionMethod']
print("------------"+method+'---------------')
for key in textConversionMethods:
if key == method:
self.advance['profiler']['textConversionMethod'][key] = "True"
else:
self.advance['profiler']['textConversionMethod'][key] = "False"
if method.lower() == 'bm25':
self.advance['profiler']['textConversionMethod']['bm25'] = "True"
def updateEmbeddingForContextualsearch(self):
method = self.__get_true_option(self.basic['algorithms']['contextualSearch'])
textConversionMethods = self.advance['profiler']['textConversionMethod']
print("------------"+method+'---------------')
for key in textConversionMethods:
if key == method:
self.advance['profiler']['textConversionMethod'][key] = "True"
else:
self.advance['profiler']['textConversionMethod'][key] = "False"
if method.lower() == 'bm25':
self.advance['profiler']['textConversionMethod']['bm25'] = "True"
def get_conversion_method(self):
return self.__get_true_option( self.advance['profiler']['textConversionMethod'])
def getAlgoName(self, problem_type=None):
if problem_type == None:
problem_type = self.__get_true_option(self.basic['algorithms'])
return self.__get_true_option(self.basic['algorithms'][problem_type])
def getScoringCreteria(self):
return self.scoringCreteria
def getVectorDBCosSearchStatus(self,problemType):
if self.basic['preprocessing'][problemType]['VectorDB'] == 'True':
return True
else:
return False
def getVectorDBFeatureDelimitInDoc(self):
return ' ~&~ '
def getEionDeployerConfiguration(self):
return cs.getEionDeployerConfiguration(self)
def getEionAssociationRuleConfiguration(self):
return cs.getEionAssociationRuleConfiguration(self)
def getEionAssociationRuleModelParams(self):
try:
associationConfg = self.advance['associationrule']
if 'modelParams' in associationConfg:
modelParams = associationConfg['modelParams']
if(str(type(modelParams)) != "<class 'dict'>"):
modelParams = []
elif(len(modelParams) == 0):
modelParams = []
if(len(modelParams) == 0):
if 'modelparamsfile' in associationConfg:
ml_algorithm_filename = associationConfg['modelparamsfile']
if(ml_algorithm_filename == ''):
ml_algorithm_filename = os.path.dirname(os.path.abspath(__file__))+'/AssciationRules_Defaults.json'
modelParams = json.loads(open(ml_algorithm_filename).read())
modelList = []
modelList = list(modelParams.keys())
return(modelParams,modelList)
except KeyError:
modelParams = []
modelList=[]
return(modelParams,modelList)
def getEionImageAugmentationConfiguration(self):
try:
enable = self.advance['ImageAug |
mentation'].get('Enable', "False")
keepAugImages = self.advance['ImageAugmentation'].get('KeepAugmentedImages', "False")
if enable == "True":
operations = {}
operations.update(self.advance['ImageAugmentation'].get('Noise', {}))
operations.update(self.advance['ImageAugmentation'].get('Transformation', {}))
if keepAugImages == 'True':
keepAugImages = True
if keepAugImages == 'False':
keepAugImages = False
return True,keepAugImages,{key: True if value.lower() == "true" else False for key, value in operations.items()},self.advance['ImageAugmentation'].get('configuration',{})
else:
return False,False, {},{}
except KeyError:
return False,False, {},{}
def getAIONRemoteTraining(self):
try:
if(self.advance['remoteTraining']):
self.advance['remoteTraining']['Enable'] = strtobool(self.advance['remoteTraining'].get('Enable', 'False'))
return self.advance['remoteTraining']
else:
remoteTraining = {}
remoteTraining['Enable'] = False
remoteTraining['server'] = None
remoteTraining['ssh'] = None
return(remoteTraining)
except KeyError:
remoteTraining = {}
remoteTraining['Enable'] = False
remoteTraining['server'] = None
remoteTraining['ssh'] = None
return(remoteTraining)
def getEionObjectDetectionConfiguration(self):
return cs.getEionObjectDetectionConfiguration(self)
def getEionTimeSeriesConfiguration(self):
return cs.getEionTimeSeriesConfiguration(self)
def getAIONAnomalyDetectionConfiguration(self):
return cs.getAIONAnomalyDetectionConfiguration(self)
def getAIONTSAnomalyDetectionConfiguration(self):
return cs.getAIONTSAnomalyDetectionConfiguration(self)
def getEionVisualizationStatus(self):
return(True)
def getEionVisualizationConfiguration(self):
return cs.getEionVisualizationConfiguration(self)
def getEionRecommenderConfiguration(self):
return cs.getEionRecommenderConfiguration(self)
def getAionNASConfiguration(self):
return cs.getAionNASConfiguration(self)
def getEionProblemType(self):
try:
analysis_type = self.basic['analysisType']
self.problemType = ''
for key in analysis_type.keys():
if analysis_type[key] == 'True':
self.problemType = key
break
if self.problemType:
return self.problemType
else:
return('NA')
except KeyError:
return('NA')
def getEionProfilerStatus(self):
return cs.getEionProfilerStatus(self)
def getEionSelectorStatus(self):
return cs.getEionSelectorStatus(self)
def getEionDeploymentStatus(self):
return cs.getEionDeploymentStatus(self)
def getEionTimeSeriesModelParams(self):
try:
selectedMLModel = self.mlmodels
tsconfig = self.advance['timeSeriesForecasting'] #task 11997
if 'modelParams' in tsconfig:
modelParams = tsconfig['modelParams']
if(str(type(modelParams)) != "<class 'dict'>"):
modelParams = []
elif(len(modelParams) == 0):
modelParams = []
if(len(modelParams) == 0):
if 'modelparamsfile' in tsconfig:
ml_algorithm_filename = tsconfig['modelparamsfile']
if(ml_algorithm_filename == ''):
ml_algorithm_filename = os.path.dirname(os.path.abspath(__file__))+'/TS_Defaults.json'
modelParams = json.loads(open(ml_algorithm_filename).read())
#Modified getting modelParams as small letters
modelParams = {k.lower(): v for k, v in modelParams.items()}
#print("\\n modelParams: type \\n",modelParams,type(modelParams))
if selectedMLModel != '':
#if selectedMLModel.lower() != 'var':
if ('var' not in selectedMLModel.lower()):
modelList = selectedMLModel.split(",")
modelList = list(map(str.strip, modelList))
#Modified getting modelList as small letters
modelList = [strMP.lower() for strMP in modelList]
for mod in modelList:
if mod not in modelParams:
self.log.info("'"+mod+"' Not Available for Particular Problem Type")
modelList.remove(mod)
else:
modelList = selectedMLModel.split(",")
#Modified
modelList = [strMP.lower() for strMP in modelList]
modelList = list(map(str.strip, modelList))
else:
#Modified
modelParams = [strMP.lower() for strMP in modelParams]
modelList = list(modelParams.keys())
return(modelParams,modelList)
except KeyError:
modelParams = []
modelList=[]
return(modelParams,modelList)
#NAS status
def getNASStatus(self):
return cs.getNASStatus(self)
def getEionImageLearnerModelParams(self):
try:
selectedDLModel = self.dlmodels
learnerconfig = self.advance['image_config']
modelList = selectedDLModel.split(",")
return(learnerconfig,modelList)
except KeyError:
learnerconfig = []
modelList=[]
return(learnerconfig,modelList)
def getAionObjectDetectionModelParams(self):
try:
selectedDLModel = self.dlmodels
modelList = selectedDLModel.split(",")
return(modelList)
except KeyError:
modelList=[]
return(modelList)
def getEionVideoLearnerModelParams(self):
try:
selectedDLModel = self.basic['selected_DL_Models']
learnerconfig = self.advance['video_config']
modelList = selectedDLModel.split(",")
return(learnerconfig,modelList)
except KeyError:
learnerconfig = []
modelList=[]
return(learnerconfig,modelList)
def getEionDeepLearnerModelParams(self,modelType):
try:
numberofModels = 0
dl_algorithm_filename = ''
if(modelType == 'classification'):
requiredalgo = 'classifierModelParams'
elif(modelType == 'regression'):
requiredalgo = 'regressorModelParams'
selectedmodels = 'regression'
elif(modelType == 'TextClassification'):
requiredalgo = 'classifierModelParams'
elif(modelType == 'clustering'):
requiredalgo = 'clusteringModelParams'
learnerconfig = self.advance['dllearner_config']
selectedDLModel = self.dlmodels
modelParams = []
modelList=[]
if 'modelParams' in learnerconfig:
modelParams = learnerconfig['modelParams']
if(str(type(modelParams)) != "<class 'dict'>"):
modelParams = []
elif(len(modelParams) == 0):
modelParams = []
if(len(modelParams) == 0):
if 'modelparamsfile' in learnerconfig:
if(learnerconfig['modelparamsfile'] != ""):
dl_algorithm_filename = learnerconfig['modelparamsfile']
if(dl_algorithm_filename == ''):
dl_algorithm_filename = os.path.dirname(os.path.abspath(__file__))+'/DL_Defaults.json'
modelParams = json.loads(open(dl_algorithm_filename).read())
if requiredalgo in modelParams:
modelParams = modelParams[requiredalgo]
if selectedDLModel != '':
modelList = selectedDLModel.split(",")
modelList = list(map(str.strip, modelList))
for mod in modelList:
if mod not in modelParams:
self.log.info("'"+mod+"' Not Available for Particular Problem Type")
modelList.remove(mod)
else:
modelList = list(modelParams.keys())
#modelParams = dict((k.lower(), v) for k, v in modelParams .items())
#modelList = selectedMLModel.split(",")
if(len(modelList) == 0):
modelList = list(modelParams.keys())
return(modelParams,modelList)
except KeyError:
modelParams = []
modelList=[]
return(modelParams,modelList)
def getEionLearnerModelParams(self,modelType):
try:
numberofModels = 0
ml_algorithm_filename = ''
if(modelType == 'classification'):
requiredalgo = 'classifierModelParams'
elif(modelType == 'regression'):
requiredalgo = 'regressorModelParams'
elif(modelType == 'TextClassification'):
requiredalgo = 'classifierModelParams'
elif(modelType == 'clustering'):
requiredalgo = 'clusteringModelParams'
elif(modelType == 'topicmodelling'):
requiredalgo = 'topicModellingParams'
learnerconfig = self.advance['mllearner_config']
selectedMLModel = self.mlmodels
modelParams = []
modelList=[]
if 'modelParams' in learnerconfig:
modelParams = learnerconfig['modelParams']
if(str(type(modelParams)) != "<class 'dict'>"):
modelParams = []
elif(len(modelParams) == 0):
modelParams = []
if(len(modelParams) == 0):
if 'modelparamsfile' in learnerconfig:
if(learnerconfig['modelparamsfile'] != ""):
ml_algorithm_filename = learnerconfig['modelparamsfile']
if(ml_algorithm_filename == ''):
ml_algorithm_filename = os.path.dirname(os.path.abspath(__file__))+'/ML_Defaults.json'
modelParams = json.loads(open(ml_algorithm_filename).read())
if requiredalgo in modelParams:
modelParams = modelParams[requiredalgo]
#modelParams = dict((k.lower(), v) for k, v in modelParams .items())
#print(modelParams)
#modelList = list(modelParams.keys())
#print("SelectedModels")
#self.log.info(selectedmodels)
#if selectedmodels in selectedMLModel:
if selectedMLModel != '':
modelList = selectedMLModel.split(",")
modelList = list(map(str.strip, modelList))
for mod in modelList:
if mod not in modelParams:
self.log.info("'"+mod+"' Not Available for Particular Problem Type")
modelList.remove(mod)
else:
modelList = list(modelParams.keys())
#modelList = selectedMLModel.split(",")
if(len(modelList) ==0):
modelList = list(modelParams.keys())
return(modelParams,modelList)
except KeyError:
modelParams = []
modelList=[]
return(modelParams,modelList)
def getTargetFeatures(self):
return cs.getTargetFeatures(self)
def getModelFeatures(self):
try:
if(self.basic['trainingFeatures']):
modFeatures = self.basic['trainingFeatures']
modFeatures = modFeatures.split(",")
modFeatures = list(map(str.strip, modFeatures))
modFeatures = ",".join([modf for modf in modFeatures])
return(modFeatures)
else:
return('NA')
except KeyError:
return('NA')
def getFolderSettings(self):
return cs.getFolderSettings(self)
def getAIONLocationSettings(self):
self.iter_name = self.basic['modelName']
self.iteration_version = self.basic['modelVersion']
if(self.basic['dataLocation']):
dataLocation = self.basic['dataLocation']
else:
dataLocation = 'NA'
if(self.basic['deployLocation']):
deployLocation = self.basic['deployLocation']
else:
deployLocation = 'NA'
try:
if 'fileSettings' in self.basic:
csv_setting = self.basic['fileSettings']
if 'delimiters' in csv_setting:
delimiter = csv_setting['delimiters']
if delimiter.lower() == 'tab' or delimiter.lower() == '\\t':
delimiter = '\\t'
elif delimiter.lower() == 'semicolon' or delimiter.lower() == ';':
delimiter = ';'
elif delimiter.lower() == 'comma' or delimiter.lower() == ',':
delimiter = ','
|
elif delimiter.lower() == 'space' or delimiter.lower() == ' ':
delimiter = ' '
elif delimiter.lower() == 'other':
if 'other' in csv_setting:
delimiter = csv_setting['other']
else:
delimiter = ','
elif delimiter == '':
delimiter = ','
else:
delimiter = ','
if 'textqualifier' in csv_setting:
textqualifier = csv_setting['textqualifier']
else:
textqualifier = '"'
else:
delimiter = ','
textqualifier = '"'
except KeyError:
delimiter = ','
textqualifier = '"'
return(self.iter_name,self.iteration_version,dataLocation,deployLocation,delimiter,textqualifier)
def getFeatures(self):
try:
if(self.basic['dateTimeFeature']):
dtFeatures = self.basic['dateTimeFeature']
dtFeatures = dtFeatures.split(",")
dtFeatures = list(map(str.strip, dtFeatures))
dtFeatures = ",".join([dtf for dtf in dtFeatures])
else:
dtFeatures = 'NA'
except KeyError:
dtFeatures = 'NA'
try:
if(self.basic['indexFeature']):
iFeatures = self.basic['indexFeature']
iFeatures = iFeatures.split(",")
iFeatures = list(map(str.strip, iFeatures))
iFeatures = ",".join([dif for dif in iFeatures])
else:
iFeatures = 'NA'
except KeyError:
iFeatures = 'NA'
try:
if(self.basic['trainingFeatures']):
modFeatures = self.basic['trainingFeatures']
modFeatures = modFeatures.split(",")
modFeatures = list(map(str.strip, modFeatures))
modFeatures = ",".join([modf for modf in modFeatures])
else:
modFeatures = 'NA'
except KeyError:
modFeatures = 'NA'
return(dtFeatures,iFeatures,modFeatures)
def setModels(self):
return cs.setModels(self)
def readConfigurationFile(self,path):
return cs.readConfigurationFile(self, path)
def getFilterExpression(self):
return cs.getFilterExpression(self)
def getSurvivalEventColumn(self):
return cs.getSurvivalEventColumn(self)
def getSurvivalDurationColumn(self):
return cs.getSurvivalDurationColumn(self)<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''# -*- coding: utf-8 -*-
"""
@author: satish_k
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import statistics
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from lifelines import KaplanMeierFitter, CoxPHFitter
from lifelines.statistics import logrank_test
from scipy import stats
import logging
class SurvivalAnalysis(object):
def __init__(self, df, method, event_column, duration_column, fitter_param=None, df_negate=None ):
pd.options.display.width = 30
self.df = df
self.fitter_param = fitter_param
self.method = method
self.event_column = event_column
self.duration_column = duration_column
self.models = []
self.train = df.drop_duplicates().reset_index()
self.test = None
if isinstance(df_negate, pd.DataFrame):
self.df_n = df_negate.drop_duplicates().reset_index()
else:
self.df_n = None
self.log = logging.getLogger('eion')
self.plots = []
def learn(self):
self.log.info('\\n---------- SurvivalAnalysis learner has started ----------')
self.log.info('\\n---------- SurvivalAnalysis learner method is "%s" ----------'%self.method)
lifelines_univariate_models = ["AalenJohansenFitter", "BreslowFlemingHarringtonFitter", "ExponentialFitter", "GeneralizedGammaFitter",
"KaplanMeierFitter", "LogLogisticFitter", "LogNormalFitter", "MixtureCureFitter", "NelsonAalenFitter", "PiecewiseExponentialFitter",
"SplineFitter", "WeibullFitter"]
lifelines_regression_models = ["AalenAdditiveFitter", "CRCSplineFitter", "CoxPHFitter", "CoxTimeVaryingFitter", "GeneralizedGammaRegressionFitter",
"LogLogisticAFTFitter", "LogNormalAFTFitter", "PiecewiseExponentialRegressionFitter", "WeibullAFTFitter"]
if self.method.lower() in ['kaplanmeierfitter','kaplanmeier','kaplan-meier','kaplan meier','kaplan','km','kmf']:
self.log.info('\\n---------- SurvivalAnalysis learner method "%s" has started ----------'%self.method)
#from lifelines.utils import find_best_parametric_model
#m,s = find_best_parametric_model(event_times=self.df[self.duration_column])
if not isinstance(self.df_n, pd.DataFrame):
kmf = KaplanMeierFitter()
self.log.info('\\n Shape of training data - %s'%str(self.train.shape))
T = self.train[self.duration_column]
E = self.train[self.event_column]
self.log.info('\\n T : \\n%s'%str(T))
self.log.info('\\n E : \\n%s'%str(E))
K = kmf.fit(T, E)
ax = plt.subplot(111)
kmf_sf = K.survival_function_
ax = kmf_sf.plot(ax=ax)
kmf_sf_json = self.survival_probability_to_json(kmf_sf)
self.models.append(K)
plt.title("KM Survival Functions")
self.plots.append(plt)
self.log.info('\\n---------- SurvivalAnalysis learner method "%s" has ended ----------'%self.method)
self.log.info('\\n---------- SurvivalAnalysis learner has ended ----------')
return kmf_sf_json
else:
kmf1 = KaplanMeierFitter()
kmf2 = KaplanMeierFitter()
T1 = self.train[self.duration_column]
E1 = self.train[self.event_column]
#self.df_n = self.df_n.drop('fin', axis=1)
T2 = self.df_n[self.duration_column]
E2 = self.df_n[self.event_column]
ax = plt.subplot(111)
plt.title("KM Survival Functions - Filter vs Negation")
self.log.info('\\n---------- SurvivalAnalysis learner "%s" fitting for filter expression has started----------'%self.method)
kmf1.fit(T1, E1)
ax = kmf1.plot(ax=ax, label='%s'%self.fitter_param)
self.log.info('\\n---------- SurvivalAnalysis learner "%s" fitting for filter expression has ended----------'%self.method)
self.log.info('\\n---------- SurvivalAnalysis learner "%s" fitting for negation has started----------'%self.method)
kmf2.fit(T2, E2)
ax = kmf2.plot(ax=ax, label='~%s'%self.fitter_param)
self.log.info('\\n---------- SurvivalAnalysis learner "%s" fitting for negation has ended----------'%self.method)
self.models.extend([kmf1,kmf2])
kmf1_sf = kmf1.survival_function_
kmf2_sf = kmf2.survival_function_
kmf1_sf_json = self.survival_probability_to_json(kmf1_sf)
self.plots.append(plt)
self.log.info('\\n---------- SurvivalAnalysis learner method "%s" has ended ----------'%self.method)
self.log.info('\\n---------- SurvivalAnalysis learner has ended ----------')
return kmf1_sf_json
elif self.method.lower() in ['coxphfitter','coxregression','cox-regression','cox regression','coxproportionalhazard','coxph','cox','cph']:
self.log.info('\\n---------- SurvivalAnalysis learner method "%s" has started ----------'%self.method)
#from lifelines.utils import k_fold_cross_validation
if not isinstance(self.df_n, pd.DataFrame):
cph = CoxPHFitter()
C = cph.fit(self.train, self.duration_column, self.event_column, show_progress=True)
self.models.append(C)
cph_sf = C.baseline_survival_
ax = plt.subplot(111)
ax = C.plot(ax=ax)
cph_sf_json = self.survival_probability_to_json(cph_sf)
self.log.info('\\n Summary : \\n%s'%str(C.summary))
plt.title("COX hazard ratio")
self.plots.append(plt)
self.log.info('\\n---------- SurvivalAnalysis learner method "%s" has ended ----------'%self.method)
self.log.info('\\n---------- SurvivalAnalysis learner has ended ----------')
#plt.show()
return cph_sf_json
else:
cph1 = CoxPHFitter(penalizer=0.0001)
cph2 = CoxPHFitter(penalizer=0.0001)
ax = plt.subplot(211)
plt.title("COX hazard ratio - [%s](Top) vs [~(%s)](Bottom)"%(self.fitter_param,self.fitter_param))
#self.train = self.train.drop('fin',axis=1)
self.df_n = self.drop_constant_features(self.df_n)
self.log.info('\\n---------- SurvivalAnalysis learner "%s" fitting for filter expression has started----------'%self.method)
cph1.fit(self.train, self.duration_column, self.event_column, show_progress=True, step_size=0.4)
ax = cph1.plot(ax=ax, label='%s'%self.fitter_param)
self.log.info('\\n---------- SurvivalAnalysis learner "%s" fitting for filter expression has ended----------'%self.method)
self.log.info('\\n---------- SurvivalAnalysis learner "%s" fitting for negation has started----------'%self.method)
cph2.fit(self.df_n, self.duration_column, self.event_column, show_progress=True, step_size=0.4)
ax = plt.subplot(212)
ax = cph2.plot(ax=ax, label='~%s'%self.fitter_param)
self.log.info('\\n---------- SurvivalAnalysis learner "%s" fitting for negation has ended----------'%self.method)
self.models.extend([cph1,cph2])
cph1_sf = cph1.baseline_survival_
cph2_sf = cph2.baseline_survival_
cph1_sf_json = self.survival_probability_to_json(cph1_sf)
#plt.show()
plt.tight_layout()
self.plots.append(plt)
self.log.info('\\n---------- SurvivalAnalysis learner method "%s" has ended ----------'%self.method)
self.log.info('\\n---------- SurvivalAnalysis learner has ended ----------')
return cph1_sf_json
def survival_probability_to_json(self, sf):
'''
sf = Survival function i.e. KaplanMeierFitter.survival_function_ or CoxPHFitter.baseline_survival_
returns json of survival probabilities
'''
sf = sf[sf.columns[0]].apply(lambda x: "%4.2f"%(x*100))
self.log.info('\\n Survival probabilities : \\n%s'%str(sf))
sf = sf.reset_index()
sf = sf.sort_values(sf.columns[0])
sf_json = sf.to_json(orient='records')
self.log.info('\\n Survival prbability json : \\n%s'%str(sf_json))
return sf_json
def drop_constant_features(self, df):
for col in df.columns:
if len(df[col].unique()) == 1:
df.drop(col,inplace=True,axis=1)
return df
def predict(self):
if self.method == 'KaplanMeierFitter':
return self.model.predict(self.test[self.duration_column])
#kmf.predict()
#kmf.median_survival_time_
#from lifelines.utils import median_survival_times
#median_ci = median_survival_times(kmf.confidence_interval_)
elif self.method == 'CoxPHFitter':
#print('train score',self.model.score(self.train))
#print('test score',self.model.score(self.test))
return self.model.predict_survival_function(self.test)
#cph.predict_cumulative |
_hazard()
#cph.predict_expectation()
#cph.predict_log_partial_hazard()
#cph.predict_median()
#cph.predict_partial_hazard()
#cph.predict_percentile()
#cph.predict_survival_function()
#cph.predict_hazard()
#cph.score()
#cph.summary()
#if __name__ == "__main__":
# data_file = r"C:\\Users\\satish_k\\Desktop\\Work\\input\\echocardiogram.csv"
# #data_file = r"C:\\Users\\satish_k\\Desktop\\Work\\input\\lymphoma.csv"
# method = "CoxPHFitter"
# event_column = "alive"
# duration_column = "survival"
# sa = SurvivalAnalysis(data_file, method, event_column, duration_column)
# sa.profiler()
# model = sa.learn()
# print(sa.predict())
#print(model.survival_function_)
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import logging
logging.getLogger('tensorflow').disabled = True
import json
#from nltk.corpus import stopwords
from collections import Counter
from numpy import mean
from numpy import std
from pandas import read_csv
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
from learner.machinelearning import machinelearning
# from sklearn.dummy import DummyClassifier
# create histograms of numeric input variables
import sys
import os
import re
import pandas as pd
import numpy as np
from learner.aion_matrix import aion_matrix
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
import autokeras as ak
# load the sonar dataset
from sklearn.model_selection import train_test_split
# from sklearn.metrics import cohen_kappa_score
# from sklearn.metrics import roc_auc_score
# from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve
from math import sqrt
from sklearn.metrics import mean_squared_error, explained_variance_score,mean_absolute_error
from sklearn import metrics
class aionNAS:
def __init__(self,nas_class,nas_params,xtrain1,xtest1,ytrain1,ytest1,deployLocation):
try:
self.dfFeatures=None
self.nas_class=nas_class
self.nas_params=nas_params
self.targetFeature=None
self.log = logging.getLogger('eion')
self.n_models=int(self.nas_params['n_models'])
self.n_epochs=int(self.nas_params['n_epochs'])
self.optimizer=self.nas_params['optimizer']
self.metrics=self.nas_params['metrics']
self.tuner=self.nas_params['tuner']
self.seed=int(self.nas_params['seed'])
self.xtrain = xtrain1
self.xtest = xtest1
self.ytrain = ytrain1
self.ytest = ytest1
#self.labelMaps = labelMaps
self.deployLocation=deployLocation
except Exception as e:
self.log.info('<!------------- NAS INIT Error ---------------> ')
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def paramCheck(self):
try:
if not (self.nas_class):
self.log.info('<!------------- NAS class input Error ---------------> ')
if not (self.nas_params):
self.log.info('<!------------- NAS model hyperparameter input Error ---------------> ')
if not (self.targetFeature):
self.log.info('<!------------- NAS model targetFeature input Error ---------------> ')
if (self.n_models < 1):
self.n_models=1
if not (self.dfFeatures):
self.log.info('<!------------- NAS model features Error ---------------> ')
if (self.n_epochs < 1):
self.n_models=1
if not (self.optimizer):
self.optimizer="adam"
if not (self.tuner):
self.tuner="greedy"
if (self.seed < 1):
self.seed=0
if not (self.metrics):
self.metrics=None
except ValueError:
self.log.info('<------------------ NAS config file error. --------------->')
def recall_m(self,y_true, y_pred):
true_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_true * y_pred, 0, 1)))
possible_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + tf.keras.backend.epsilon())
return recall
def precision_m(self,y_true, y_pred):
true_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_true * y_pred, 0, 1)))
predicted_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + tf.keras.backend.epsilon())
return precision
def f1_score(self,y_true, y_pred):
precision = self.precision_m(y_true, y_pred)
recall = self.recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+tf.keras.backend.epsilon()))
def nasStructdataPreprocess(self):
df=self.data
self.paramCheck()
target=df[self.targetFeature].values
counter = Counter(target)
for k,v in counter.items():
per = v / len(target) * 100
self.log.info('autokeras struct Class=%d, Count=%d, Percentage=%.3f%%' % (k, v, per))
# select columns with numerical data types
num_ix = df.select_dtypes(include=['int64', 'float64']).columns
subset = df[num_ix]
last_ix = len(df.columns) - 1
y=df[self.targetFeature]
X = df.drop(self.targetFeature, axis=1)
#Using Pearson Correlation
# plt.figure(figsize=(12,10))
# cor = df.corr()
# sns.heatmap(cor, annot=True, cmap=plt.cm.Reds)
# plt.show()
# select categorical features
cat_ix = X.select_dtypes(include=['object', 'bool']).columns
# one hot encode cat features only
ct = ColumnTransformer([('o',OneHotEncoder(),cat_ix)], remainder='passthrough')
X = X.reset_index()
X=X.replace(to_replace="NULL",value=0)
X = X.dropna(how='any',axis=0)
X = ct.fit_transform(X)
from sklearn.preprocessing import scale
X = scale(X)
# label encode the target variable to have the classes 0 and 1
y = LabelEncoder().fit_transform(y)
# separate into train and test sets
X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=self.test_size,random_state=1)
return X_train, X_test, y_train, y_test
def nasStructClassification(self,scoreParam):
try:
objClf = aion_matrix()
X_train, X_test, y_train, y_test= self.xtrain, self.xtest, self.ytrain, self.ytest
modelName="nas_structdata_classifier"
self.log.info("Processing structured data block...\\n")
s_in = ak.StructuredDataInput()
#s_in = Flatten()(s_in)
s_out = ak.StructuredDataBlock(categorical_encoding=True)(s_in)
self.log.info("Data pipe via autokeras Classification Dense layers ...\\n")
s_out = ak.ClassificationHead()(s_out)
self.log.info("applying autokeras automodel to run different neural models...\\n")
try:
tuner = str(self.tuner).lower()
except UnicodeEncodeError:
tuner = (self.tuner.encode('utf8')).lower()
nasclf = ak.AutoModel(
inputs=s_in,
outputs=s_out,
overwrite=True,
tuner=tuner,
max_trials=self.n_models,
seed=self.seed)
# compile the model
#nasclf.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc',self.f1_score,self.precision_m, self.recall_m])
nasclf.fit(X_train, y_train, epochs=self.n_epochs)
best_model = nasclf.export_model()
mpredict=best_model.predict(X_test)
mtpredict=best_model.predict(X_train)
#loss, accuracy, f1_score, precision, recall = nasclf.evaluate(X_test, y_test, verbose=0)
#from sklearn.metrics import classification_report
#Classification report
y_pred_bool = np.argmax(mpredict, axis=1)
y_train_pred_bool = np.argmax(mtpredict, axis=1)
score = objClf.get_score(scoreParam,y_test, y_pred_bool)
#best_model = nasclf.export_model()
best_model_summary=best_model.summary()
filename = os.path.join(self.deployLocation,'log','summary.txt')
with open(filename,'w') as f:
best_model.summary(print_fn=lambda x: f.write(x + '\\n'))
f.close()
#self.log.info("==========")
#self.log.info(best_model_summary)
self.log.info("NAS struct data classification, best model summary: \\n"+str(best_model.summary(print_fn=self.log.info)))
#self.log.info("==========")
#Save and load model
# # #try:
# try:
# best_model.save("model_class_autokeras", save_format="tf")
# except Exception:
# best_model.save("model_class_autokeras.h5")
# loaded_model = load_model("model_class_autokeras", custom_objects=ak.CUSTOM_OBJECTS)
# loadedmodel_predict=loaded_model.predict(X_test)
loss,accuracy_m=nasclf.evaluate(X_test, y_test)
#mpredict_classes = mpredict.argmax(axis=-1)
#accuracy = accuracy_score(y_test.astype(int), mpredict.astype(int))
# precision tp / (tp + fp)
#precision = precision_score(y_test.astype(int), mpredict.astype(int),average='macro')
# recall: tp / (tp + fn)
#recall = recall_score(y_test.astype(int), mpredict.astype(int),average='macro')
#f1score=f1_score(y_test.astype(int), mpredict.astype(int) , average="macro")
self.log.info("Autokeras struct data classification metrics: \\n")
except Exception as inst:
self.log.info("Error: NAS failed "+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
print(inst)
return modelName,nasclf,score
def nasStructRegressor(self,scoreParam):
objClf = aion_matrix()
modelName="nas_struct_regressor"
#self.paramCheck()
X_train, X_test, y_train, y_test= self.xtrain, self.xtest, self.ytrain, self.ytest
# Autokeras alg
s_in = ak.StructuredDataInput()
#tf.keras.layers.GlobalMaxPooling2D()(s_in)
s_out = ak.StructuredDataBlock(categorical_encoding=True)(s_in)
self.log.info("Data pipe via autokeras Regression Dense layers ...\\n")
s_out = ak.RegressionHead(loss='mse', metrics=['mae'])(s_out)
self.log.info("applying autokeras automodel to evaluate different neural models...\\n")
try:
tuner = str(self.tuner).lower()
except UnicodeEncodeError:
tuner = (self.tuner.encode('utf8')).lower()
nas_reg = ak.AutoModel(
inputs=s_in,
outputs=s_out,
overwrite=True,
tuner=tuner,
max_trials=self.n_models)
nas_reg.fit(X_train, y_train, epochs=self.n_epochs)
best_model = nas_reg.export_model()
self.log.info("NAS struct data regression best model summary: \\n")
best_model_summary=best_model.summary(print_fn=self.log.info)
self.log.info(best_model_summary)
predictm=best_model.predict(X_test)
mtpredict=best_ |
model.predict(X_train)
score = objClf.get_score(scoreParam,y_test, predictm)
self.log.info("Autokeras struct data regression metrics: \\n")
return modelName,nas_reg,score
def nasMain(self,scoreParam):
modelName = ""
nasclf=None
nas_reg=None
#text_reg_model=None
mse_value=0
reg_rmse=0
mape_reg=0
huber_loss_reg=0
accuracy=0
precision=0
recall=0
#Dummy values to return main for classification problems
dummy_score_1=int(0)
#dummy_score_2=int(0)
try:
if ((self.nas_class.lower() == "classification")):
modelName,nasclf,score=self.nasStructClassification(scoreParam)
self.log.info('NAS Struct Classification score: '+str(score))
best_model_nas = nasclf.export_model()
scoredetails = '{"Model":"NAS","Score":'+str(round(score,2))+'}'
return best_model_nas,self.nas_params,round(score,2),'NAS',-1,-1,-1
elif (self.nas_class.lower() == "regression"):
modelName,nas_reg,score =self.nasStructRegressor(scoreParam)
self.log.info('NAS Struct Regression score: '+str(score))
best_model_nas = nas_reg.export_model()
'''
filename = os.path.join(self.deployLocation,'model','autoKerasModel')
best_model_nas = nas_reg.export_model()
try:
best_model_nas.save(filename, save_format="tf")
modelName = 'autoKerasModel'
except Exception:
filename = os.path.join(self.deployLocation,'model','autoKerasModel.h5')
best_model_nas.save(filename)
modelName = 'autoKerasModel.h5'
'''
scoredetails = '{"Model":"NAS","Score":'+str(round(score,2))+'}'
'''
error_matrix = '"MSE":"'+str(round(mse_value,2))+'","RMSE":"'+str(round(reg_rmse,2))+'","MAPE":"'+str(round(mape_reg,2))+'","MSLE":"'+str(round(msle_reg,2))+'"'
'''
return best_model_nas,self.nas_params,score,'NAS'
else:
pass
except Exception as inst:
print(inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
output = {"status":"FAIL","message":str(inst).strip('"')}
output = json.dumps(output)
<s> import itertools
import logging
from typing import Optional, Dict, Union
from nltk import sent_tokenize
import torch
from transformers import(
AutoModelForSeq2SeqLM,
AutoTokenizer,
PreTrainedModel,
PreTrainedTokenizer,
)
logger = logging.getLogger(__name__)
class QGPipeline:
"""Poor man's QG pipeline"""
def __init__(
self,
model: PreTrainedModel,
tokenizer: PreTrainedTokenizer,
ans_model: PreTrainedModel,
ans_tokenizer: PreTrainedTokenizer,
qg_format: str,
use_cuda: bool
):
self.model = model
self.tokenizer = tokenizer
self.ans_model = ans_model
self.ans_tokenizer = ans_tokenizer
self.qg_format = qg_format
self.device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu"
self.model.to(self.device)
if self.ans_model is not self.model:
self.ans_model.to(self.device)
assert self.model.__class__.__name__ in ["T5ForConditionalGeneration", "BartForConditionalGeneration"]
if "T5ForConditionalGeneration" in self.model.__class__.__name__:
self.model_type = "t5"
else:
self.model_type = "bart"
def __call__(self, inputs: str):
inputs = " ".join(inputs.split())
sents, answers = self._extract_answers(inputs)
flat_answers = list(itertools.chain(*answers))
if len(flat_answers) == 0:
return []
if self.qg_format == "prepend":
qg_examples = self._prepare_inputs_for_qg_from_answers_prepend(inputs, answers)
else:
qg_examples = self._prepare_inputs_for_qg_from_answers_hl(sents, answers)
qg_inputs = [example['source_text'] for example in qg_examples]
questions = self._generate_questions(qg_inputs)
output = [{'answer': example['answer'], 'question': que} for example, que in zip(qg_examples, questions)]
return output
def _generate_questions(self, inputs):
inputs = self._tokenize(inputs, padding=True, truncation=True)
outs = self.model.generate(
input_ids=inputs['input_ids'].to(self.device),
attention_mask=inputs['attention_mask'].to(self.device),
max_length=32,
num_beams=4,
)
questions = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in outs]
return questions
def _extract_answers(self, context):
sents, inputs = self._prepare_inputs_for_ans_extraction(context)
inputs = self._tokenize(inputs, padding=True, truncation=True)
outs = self.ans_model.generate(
input_ids=inputs['input_ids'].to(self.device),
attention_mask=inputs['attention_mask'].to(self.device),
max_length=32,
)
dec = [self.ans_tokenizer.decode(ids, skip_special_tokens=False) for ids in outs]
answers = [item.split('<sep>') for item in dec]
answers = [i[:-1] for i in answers]
return sents, answers
def _tokenize(self,
inputs,
padding=True,
truncation=True,
add_special_tokens=True,
max_length=512
):
inputs = self.tokenizer.batch_encode_plus(
inputs,
max_length=max_length,
add_special_tokens=add_special_tokens,
truncation=truncation,
padding="max_length" if padding else False,
pad_to_max_length=padding,
return_tensors="pt"
)
return inputs
def _prepare_inputs_for_ans_extraction(self, text):
sents = sent_tokenize(text)
inputs = []
for i in range(len(sents)):
source_text = "extract answers:"
for j, sent in enumerate(sents):
if i == j:
sent = "<hl> %s <hl>" % sent
source_text = "%s %s" % (source_text, sent)
source_text = source_text.strip()
if self.model_type == "t5":
source_text = source_text + " </s> "
inputs.append(source_text)
return sents, inputs
def _prepare_inputs_for_qg_from_answers_hl(self, sents, answers):
inputs = []
for i, answer in enumerate(answers):
if len(answer) == 0: continue
for answer_text in answer:
sent = sents[i]
sents_copy = sents[:]
answer_text = answer_text.strip()
ans_start_idx = 0
# ans_start_idx = sent.index(answer_text)
# if answer_text in sent:
# ans_start_idx = sent.index(answer_text)
# else:
# continue
sent = f"{sent[:ans_start_idx]} <hl> {answer_text} <hl> {sent[ans_start_idx + len(answer_text): ]}"
sents_copy[i] = sent
source_text = " ".join(sents_copy)
source_text = f"generate question: {source_text}"
if self.model_type == "t5":
source_text = source_text + " </s> "
inputs.append({"answer": answer_text, "source_text": source_text})
return inputs
def _prepare_inputs_for_qg_from_answers_prepend(self, context, answers):
flat_answers = list(itertools.chain(*answers))
examples = []
for answer in flat_answers:
source_text = f"answer: {answer} context: {context}"
if self.model_type == "t5":
source_text = source_text + " </s> "
examples.append({"answer": answer, "source_text": source_text})
return examples
class MultiTaskQAQGPipeline(QGPipeline):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def __call__(self, inputs: Union[Dict, str]):
if type(inputs) is str:
# do qg
return super().__call__(inputs)
else:
# do qa
return self._extract_answer(inputs["question"], inputs["context"])
def _prepare_inputs_for_qa(self, question, context):
source_text = f"question: {question} context: {context}"
if self.model_type == "t5":
source_text = source_text + " </s> "
return source_text
def _extract_answer(self, question, context):
source_text = self._prepare_inputs_for_qa(question, context)
inputs = self._tokenize([source_text], padding=False)
outs = self.model.generate(
input_ids=inputs['input_ids'].to(self.device),
attention_mask=inputs['attention_mask'].to(self.device),
max_length=16,
)
answer = self.tokenizer.decode(outs[0], skip_special_tokens=True)
return answer
class E2EQGPipeline:
def __init__(
self,
model: PreTrainedModel,
tokenizer: PreTrainedTokenizer,
use_cuda: bool
) :
self.model = model
self.tokenizer = tokenizer
self.device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu"
self.model.to(self.device)
assert self.model.__class__.__name__ in ["T5ForConditionalGeneration", "BartForConditionalGeneration"]
if "T5ForConditionalGeneration" in self.model.__class__.__name__:
self.model_type = "t5"
else:
self.model_type = "bart"
self.default_generate_kwargs = {
"max_length": 256,
"num_beams": 4,
"length_penalty": 1.5,
"no_repeat_ngram_size": 3,
"early_stopping": True,
}
def __call__(self, context: str, **generate_kwargs):
inputs = self._prepare_inputs_for_e2e_qg(context)
# TODO: when overrding default_generate_kwargs all other arguments need to be passsed
# find a better way to do this
if not generate_kwargs:
generate_kwargs = self.default_generate_kwargs
input_length = inputs["input_ids"].shape[-1]
# max_length = generate_kwargs.get("max_length", 256)
# if input_length < max_length:
# logger.warning(
# "Your max_length is set to {}, but you input_length is only {}. You might consider decreasing max_length manually, e.g. summarizer('...', max_length=50)".format(
# max_length, input_length
# )
# )
outs = self.model.generate(
input_ids=inputs['input_ids'].to(self.device),
attention_mask=inputs['attention_mask'].to(self.device),
**generate_kwargs
)
prediction = self.tokenizer.decode(outs[0], skip_special_tokens=True)
questions = prediction.split("<sep>")
questions = [question.strip() for question in questions[:-1]]
return questions
def _prepare_inputs_for_e2e_qg(self, context):
source_text = f"generate questions: {context}"
if self.model_type == "t5":
source_text = source_text + " </s> "
inputs = self._tokenize([source_text], padding=False)
return inputs
def _tokenize(
self,
inputs,
padding=True,
truncation=True,
add_special_tokens=True,
max_length=512
):
inputs = self.tokenizer.batch_encode_plus(
inputs,
max_length=max_length,
add_special_tokens=add_special_tokens,
truncation=truncation,
padding="max_length" if padding else False,
pad_to_max_length=padding,
return_tensors="pt"
)
return inputs
SUPPORTED_TASKS = {
"question-generation": {
"impl": QGPipeline,
"default": {
"model": "valhalla/t5-small-qg-hl",
"ans_model": "valhalla/t5-small-qa-qg-hl",
}
},
"multitask-qa-qg": {
"impl": MultiTaskQAQGPipeline,
"default": {
"model": "valhalla/t5-small-qa-qg-hl",
}
},
"e2e-qg": {
"impl": E2EQGPipeline,
"default": {
"model": "valhalla/t5-small-e2e-qg",
}
}
}
def pipeline(
task: str,
model: Optional = None,
tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None,
qg_format: Optional[str] = "highlight",
ans_model: Optional = None,
ans_tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None,
use_cuda: Optional[bool] = True,
**kwargs,
):
# Retrieve the task
if task not in SUPPORT |
ED_TASKS:
raise KeyError("Unknown task {}, available tasks are {}".format(task, list(SUPPORTED_TASKS.keys())))
targeted_task = SUPPORTED_TASKS[task]
task_class = targeted_task["impl"]
# Use default model/config/tokenizer for the task if no model is provided
if model is None:
model = targeted_task["default"]["model"]
# Try to infer tokenizer from model or config name (if provided as str)
if tokenizer is None:
if isinstance(model, str):
tokenizer = model
else:
# Impossible to guest what is the right tokenizer here
raise Exception(
"Impossible to guess which tokenizer to use. "
"Please provided a PretrainedTokenizer class or a path/identifier to a pretrained tokenizer."
)
# Instantiate tokenizer if needed
if isinstance(tokenizer, (str, tuple)):
if isinstance(tokenizer, tuple):
# For tuple we have (tokenizer name, {kwargs})
tokenizer = AutoTokenizer.from_pretrained(tokenizer[0], **tokenizer[1])
else:
tokenizer = AutoTokenizer.from_pretrained(tokenizer)
# Instantiate model if needed
if isinstance(model, str):
model = AutoModelForSeq2SeqLM.from_pretrained(model)
if task == "question-generation":
if ans_model is None:
# load default ans model
ans_model = targeted_task["default"]["ans_model"]
ans_tokenizer = AutoTokenizer.from_pretrained(ans_model)
ans_model = AutoModelForSeq2SeqLM.from_pretrained(ans_model)
else:
# Try to infer tokenizer from model or config name (if provided as str)
if ans_tokenizer is None:
if isinstance(ans_model, str):
ans_tokenizer = ans_model
else:
# Impossible to guest what is the right tokenizer here
raise Exception(
"Impossible to guess which tokenizer to use. "
"Please provided a PretrainedTokenizer class or a path/identifier to a pretrained tokenizer."
)
# Instantiate tokenizer if needed
if isinstance(ans_tokenizer, (str, tuple)):
if isinstance(ans_tokenizer, tuple):
# For tuple we have (tokenizer name, {kwargs})
ans_tokenizer = AutoTokenizer.from_pretrained(ans_tokenizer[0], **ans_tokenizer[1])
else:
ans_tokenizer = AutoTokenizer.from_pretrained(ans_tokenizer)
if isinstance(ans_model, str):
ans_model = AutoModelForSeq2SeqLM.from_pretrained(ans_model)
if task == "e2e-qg":
return task_class(model=model, tokenizer=tokenizer, use_cuda=use_cuda)
elif task == "question-generation":
return task_class(model=model, tokenizer=tokenizer, ans_model=ans_model, ans_tokenizer=ans_tokenizer, qg_format=qg_format, use_cuda=use_cuda)
else:
return task_class(model=model, tokenizer=tokenizer, ans_model=model, ans_tokenizer=tokenizer, qg_format=qg_format, use_cuda=use_cuda)
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import socket
import os
import rsa
from os.path import expanduser
from pathlib import Path
import requests
import platform
from appbe.dataPath import DATA_DIR
import socket
import getmac
import subprocess
import sys
import json
from datetime import datetime
import binascii
computername = socket.getfqdn()
global_key = '''
-----BEGIN RSA PUBLIC KEY-----
MIIBCgKCAQEAzJcxqRiUpp7CzViyqNlYaeyceDh5y6Ib4SoxoyNkN3+k0q+cr1lb
k0KdWTtHIVqH1wsLYofYjpB7X2RN0KYTv8VfwmfQNrpFEbiRz4gcAeuxGCPgGaue
N1ttujQMWHWCcY+UH5Voh8YUfkW8P+T3zxvr1d30D+kVBJC59y/31JvTzr3Bw/T+
NYv6xiienYiEYtm9d5ATioEwZOXaQBrtVvRmqcod5A1h4kn1ZauLX2Ph8H4TAuit
NLtw6xUCJNumphP7xdU+ca6P6a6eaLprgKhvky+nz16u9/AC2AazRQHKWf8orS6b
fw16JDCRs0zU4mTQLCjkUUt0edOaRhUtcQIDAQAB
-----END RSA PUBLIC KEY-----
'''
quarter_key = '''
-----BEGIN RSA PUBLIC KEY-----
MIIBCgKCAQEAmKzOJxVEV9ulA+cjfxguAduLMD47OWjLcEAEmEuK8vR4O5f6e2h1
08NniGC+nkwqmM00U7JTVBkqnt9S/JgE3pAH2xwfWda2OvXNWisWmOQdqB0+XRHh
NXsIG3yRk/sMlDpe7MJIyM5ADSu01PLn9FZTfmMq7lEp32tAf71cuUE/dwuWSvEQ
WK2hn1L4D97O43XCd7FHtMSHfgtjdcCFgX9IRgWLKC8Bm3q5qcqF4v3cHuYTj3V9
njxPtRqPg6HJFiJrm9AX5bUEHAvbTcw4wAmsNTRQHPvVB+Lc+yGh5x8crhKjNB01
gdB5I3a4mPO7dKvadR6Mr28trr0Ff5t2HQIDAQAB
-----END RSA PUBLIC KEY-----
'''
halfYear_key='''
-----BEGIN RSA PUBLIC KEY-----
MIIBCgKCAQEAgrGNwl8CNYQmVxi8/GEgPjfL5aEmyPkDyaJb9h4hZDSZCeeKd7Rv
wwhuRTdBBfOp0bQ7QS7NYMg38Xlc3x85I9RnxdQdDKn2nRuvG0hG3wMBFy/DCSXF
tXbDjJkLijAhqcBNu8m+a2Gtn14ShC7TbcfY4iVXho3WFUrn0xq6S5ducqWCsLJh
R+TNImCaMICqfoAzEDGC3ojO5Hi3vJmmyK5CVp6bt4wLRATQjcp1ujGW4Uv4kEgp
7TR077c226v1KOdKdyZPHJzT1MKwZrG2Gdluk3/Y1apbwyGzYqFdTCOAB+mE73Dn
wFXURgDJQmaU2oxxaA13WRcELpnirm+aIwIDAQAB
-----END RSA PUBLIC KEY-----
'''
oneYear_key='''
-----BEGIN RSA PUBLIC KEY-----
MIIBCgKCAQEA3GLqn+vkKn3fTNH3Bbb3Lq60pCoe+mn0KPz74Bp7p5OkZAUe14pP
Tcf/UqdPwiENhSCseWtfZmfKDK8qYRHJ5xW02+AhHPPdiacS45X504/lGG3q/4SG
ZgaFhMDvX+IH/ZH+qqbU3dRQhXJCCrAVAa7MonzM6yPiVeS2SdpMkNg1VDR1oTLB
Pn+qSV6CnkK1cYtWCRQ23GH2Ru7fc09r7m8hVcifKJze84orpHC5FX0WScQuR8h/
fs1IbGkxTOxP8vplUj/cd4JjUxgd+w+8R4kcoPhdGZF5UGeZA8xMERzQLvh+4Ui0
KIvz5/iyKB/ozaeSG0OMwDAk3WDEnb1WqQIDAQAB
-----END RSA PUBLIC KEY-----
'''
full_key='''
-----BEGIN RSA PUBLIC KEY-----
MIIBCgKCAQEAhqfNMuYYLdVrePhkO9rU/qT6FgolzI0YyzIJ2OeJE+++JioYm6nn
ohQU32iiE0DZlCCLrHJXOOIAz2Op80goX0lxtngyxVUPsiB5CI77sAC7x6K3anJ0
elpnQCC0+xV2ZL5eIMNQHLe+X6wJl/HGWqkUlxKpWr4/kBEB4EisW60OePfhntIN
4OUJ7iEq+sDdOM5WazJIXeNV1cig4i6057GE3k5ITcQUmw17DZu2+dqkIscckaG+
t5SF7Qnvt4IY8IeQp2htx3yD+CJCV0u2uKwoSFMGJn3OWdaixC3+eojyMXmfAWtQ
Ee9NLNNaTCMIvQ8BeItJLQs2Htw3bZNMvwIDAQAB
-----END RSA PUBLIC KEY-----
'''
def validate_key_Pair(privatepath,publickey):
with open(privatepath, 'rb') as privatefile:
keydata = privatefile.read()
privatefile.close()
try:
privkey = rsa.PrivateKey.load_pkcs1(keydata,'PEM')
data = 'Validate Global License'
signature = rsa.sign(data.encode('utf-8'), privkey, 'SHA-1')
pubkey = rsa.PublicKey.load_pkcs1(publickey)
except:
return False
try:
rsa.verify(data.encode('utf-8'), signature, pubkey)
return True
except Exception as e:
return False
def updateDRecord(licensepath):
domain_license_path = os.path.join(DATA_DIR,'License','license_domain.lic')
if(os.path.isfile(licensepath)):
with open(licensepath, 'rb') as f:
licensekey = f.read()
f.close()
with open(domain_license_path, 'wb') as f:
f.write(licensekey)
f.close()
if(validate_key_Pair(domain_license_path,global_key)):
return True,'Valid Domain License'
else:
return False,'Invalid Domain License'
else:
return False,'File Not Exists'
def generateLicenseKey(userKey):
record = {'UserKey':userKey}
record = json.dumps(record)
status = 'Error'
url = 'https://qw7e33htlk.execute-api.ap-south-1.amazonaws.com/default/aion_license'
try:
response = requests.post(url, data=record,headers={"x-api-key":"3cQKRkKA4S57pYrkFp1Dd9jRXt4xnFoB9iqhAQRM","Content-Type":"application/json",})
if response.status_code == 200:
outputStr=response.content
outputStr = outputStr.decode('utf-8','ignore')
outputStr = outputStr.strip()
license_dict = json.loads(str(outputStr))
if license_dict['status'] == 'success':
status = 'Success'
licenseKey = license_dict['msg']
else:
status = 'Error'
licenseKey = ''
else:
status = 'Error'
licenseKey = ''
except Exception as inst:
print(inst)
status = 'Error'
licenseKey = ''
msg = {'status':status,'key':userKey,'licenseKey':licenseKey,'link':''}
return msg
def updateRecord(licensepath):
currentDirectory = os.path.dirname(os.path.abspath(__file__))
license_path = os.path.join(currentDirectory,'..','lic','license.lic')
if(os.path.isfile(licensepath)):
with open(licensepath, 'rb') as f:
licensekey = f.read()
f.close()
with open(license_path, 'wb') as f:
f.write(licensekey)
f.close()
status,msg = check_domain_license()
if status:
status,msg = getdaysfromstartdate()
if status:
status,msg = check_days_license(int(msg))
return status,msg
else:
return False,'File Not Exists'
def check_domain_license():
if 'CORP.HCL.IN' in computername:
return True,'HCL Domain'
else:
return True,'HCL Domain'
def diff_month(d1, d2):
return (d1.year - d2.year) * 12 + d1.month - d2.month
def getdaysfromstartdate():
currentDirectory = os.path.dirname(os.path.abspath(__file__))
startdatePath = os.path.join(currentDirectory,'..','lic','startdate.txt')
if(os.path.isfile(startdatePath)):
with open(startdatePath, "rb") as fl:
encrypted_message = fl.read()
fl.close()
privkey = '''-----BEGIN RSA PRIVATE KEY-----
MIIEqwIBAAKCAQEAm75ZwaepuxGJjU1 |
Slk1+IUO2E49Hy8i9dym5FUaBRyTRH6R+
GTF1kcpd+1QinIZDMIdsmAc95Y8pTufxY30QxCkOhVASitSQWHS/IiWQHmsTJwdr
38lqZnQQloOt/iPlhcavbxu/yKFzwBmp+nM+ErDTnCBh6EGCGrw1xWF30T2IBpmp
WwMEoqZsFV69RzwQAw39KG1KCxi5uscrB62YPgUdlT2b4Yaa90egQhGLLVdnKvhP
ORiGT9omCH90Dkm1oMMQ0Y2JBLezgXa/bunSqtTBxEwzlwUAX2JJcanFYrzKy2OL
xzwNRlWUXilZ4R/1RHAgUdNyKbYxZqc24MApoQIDAQABAoIBAQCHZ/i7gNz10qqH
2qkqGlfF7gvYd6MRTwdDGlhbYgA17ZGP9EDaAIFabtpFEAJDmgvCnotQpkMvWcet
XcUmHW89TQDd8R8d6u9QqLggpQ3nFGsDbNViLMjAKLrfUb8tjOIZ7ANNE5ArjAuK
AgYhxJ48O9bPD+xvtLwip95PHxMMz1CF0vxrpCinvPdeC3HzcnLNZWN3ustbph/4
Tx8mrKDpAVIHVYVbY4CMtm7NbIBYdyR9Lokc4zBg/OTuLo+0QRVJ3GHAN6cGxTwY
vLwN9iBBHyn9WBp5NIOSoCdob7+ce8y+X8yHmVhwRCfcrYphzfFNfP7SPNzV1dLs
dFybn/h9AoGJALCOC7ss+PBXy5WrWVNRPzFO7KrJDl5q7s/gMk0PkB4i4XOKHDTl
MhHZXhxp84HwpphwNxPHvpFe3pVZwwoe8LH1neoodlLOF0Kuk3jENh6cMhKFvcZ+
gxaBxGSCOXF/U307mh0i4AafClhVjxtLgBW5iJSVA9Brc7ZqVwxlUP7aYGzReIE1
uEMCeQDh0vq8NteUlkM/wpNzrHHqgtEzePbTYa+QcTm4xhARHR/cO+E0/mZIfltw
3NVWCIalMia+aKnvRHqHy/cQfEo2Uv/h8oARWnbrvicMRTwYL0w2GrP0f+aG0RqQ
msLMzS3kp6szhM7C99reFxdlxJoWBKkp94psOksCgYkApB01zGRudkK17EcdvjPc
sMHzfoFryNpPaI23VChuR4UW2mZ797NAypSqRXE7OALxaOuOVuWqP8jW0C9i/Cps
hI+SnZHFAw2tU3+hd3Wz9NouNUd6c2MwCSDQ5LikGttHSTa49/JuGdmGLTxCzRVu
V0NiMPMfW4I2Sk8o4U3gbzWgwiYohLrhrwJ5ANun/7IB2lIykvk7B3g1nZzRYDIk
EFpuI3ppWA8NwOUUoj/zksycQ9tx5Pn0JCMKKgYXsS322ozc3B6o3AoSC5GpzDH4
UnAOwavvC0ZZNeoEX6ok8TP7EL3EOYW8s4zIa0KFgPac0Q0+T4tFhMG9qW+PWwhy
Oxeo3wKBiCQ8LEgmHnXZv3UZvwcikj6oCrPy8fnhp5RZl2DPPlaqf3vokE6W5oEo
LIKcWKvth3EU7HRKwYgaznj/Mw55aETx31R0FiXMG266B4V7QWPF/KuaR0GBsYfu
+edGXQCnLgooKlMtQLdL5mcLXHc9x/0Z0iYEejJtbjcGR87WylSNaCH3hH703iQ=
-----END RSA PRIVATE KEY-----
'''
privkey = rsa.PrivateKey.load_pkcs1(privkey,'PEM')
decrypted_message = rsa.decrypt(encrypted_message, privkey)
decrypted_message = decrypted_message.decode()
import datetime
start_time = datetime.datetime.strptime(decrypted_message, '%Y-%m-%d')
current_date = datetime.datetime.today().strftime('%Y-%m-%d')
current_date = datetime.datetime.strptime(current_date, '%Y-%m-%d')
Months = diff_month(current_date,start_time)
return True,Months
else:
return False,'Start Date Not Exists'
def check_days_license(months):
currentDirectory = os.path.dirname(os.path.abspath(__file__))
license_path = os.path.join(currentDirectory,'..','lic','license.lic')
if(os.path.isfile(license_path)):
if(validate_key_Pair(license_path,full_key)):
return True,'Valid License'
elif(validate_key_Pair(license_path,oneYear_key)):
if months <= 12:
return True,'Valid License'
else:
return False,'License for AI.ON has expired. Please contact ERS Research for renewal.'
elif(validate_key_Pair(license_path,halfYear_key)):
if months <= 6:
return True,'Valid License'
else:
return False,'License for AI.ON has expired. Please contact ERS Research for renewal.'
elif(validate_key_Pair(license_path,quarter_key)):
if months <= 3:
return True,'Valid License'
else:
return False,'License for AI.ON has expired. Please contact ERS Research for renewal.'
else:
return False,'Invalid License'
else:
return False,'License Not exists.Please contact ERS Research for renewal.'
def checklicense():
import binascii
license_path = os.path.join(DATA_DIR,'License','license.lic')
if(os.path.isfile(license_path)):
try:
with open(license_path, 'r') as privatefile:
license_key = privatefile.read()
privatefile.close()
encrypted_message = binascii.unhexlify(license_key.encode())
privkey = '''-----BEGIN RSA PRIVATE KEY-----
MIIEqQIBAAKCAQEAhqfNMuYYLdVrePhkO9rU/qT6FgolzI0YyzIJ2OeJE+++JioY
m6nnohQU32iiE0DZlCCLrHJXOOIAz2Op80goX0lxtngyxVUPsiB5CI77sAC7x6K3
anJ0elpnQCC0+xV2ZL5eIMNQHLe+X6wJl/HGWqkUlxKpWr4/kBEB4EisW60OePfh
ntIN4OUJ7iEq+sDdOM5WazJIXeNV1cig4i6057GE3k5ITcQUmw17DZu2+dqkIscc
kaG+t5SF7Qnvt4IY8IeQp2htx3yD+CJCV0u2uKwoSFMGJn3OWdaixC3+eojyMXmf
AWtQEe9NLNNaTCMIvQ8BeItJLQs2Htw3bZNMvwIDAQABAoIBAGGmuRnrYaeDeWAO
CmqZxRMyQybOjyDrRgq9rAR/zJoHp8b3ikcBDTkuBQELWVZLFj7k50XU2cono9zC
cxI5xwVrNqrUOkV+7VYJVJzPTFkT/xnEt+zbOfstKmmIDpdzthtTLuHlomhhHA83
rPFi5a0Dpynz35suEnm6ONxx4ICONa3xkQ51ALm8EEsdJ+qRQhi2HLTF/OVZMxSa
A2DlFd4ChOEbYaN63xVCDxPXe9BfeHd/Rnim9x4xL9i2RL+mhARUy/ZP6LMHIPk7
NxTrGr4TuE/ETg8FZ3cywSnwsMlcplXo8Ar+5ths2XKxbmH1TI/vuQV1r7r0IeqV
F4W/xOkCgYkAiDQy7/WyJWuT+rQ+gOjSUumXgWE3HO+vJAsy05cTZFSs+nUE4ctn
FnvbBIRuClSr3zhcTtjaEaVnZ2OmGfOoAq0cvaXSlxqEs2456WQBf9oPHnvJEV07
AIqzo2EuDvGUh/bkFN3+djRRL9usNNplYA8jU3OQHGdeaS15ZikT+ZkQLXoHE0Oh
vQJ5AP0W9Qouvc9jXRhjNNOWmgt+JiHw/oQts/LUWJ2T4UJ7wKAqGwsmgf0NbF2p
aZ6AbMc7dHzCb52iLJRxlmlkJYzg449t0MgQVxTKQ5viIAdjkRBCIY2++GcYXb6k
6tUnF0Vm2kpffYUb5Lx5JoUE6IhMP0mEv3jKKwKBiCmvoC9lCUL+q+m9JKwbldOe
fqowcMfAa+AiNUohIORCLjbxfa8Fq+VrvtqhFXS/+WJ2Q3o2UHe6Ie24x+uFcVRw
Wy2IBO4ORbMM91iBLRxORvZTeHSCDj7aNKS6Z3hXY9hBLglc8DaJSJfXKdt7RC+k
MnGmGuM2l+Sk8FTeGaj4ucTRZjz1JBkCeQDhNSV1GyShv4xeoCCoy1FmOqmZ+EWy
vqxqv1PfXHDM5SwCGZWY9XokAGbWbWLjvOmO27QLNEV34pCCwxSR0aCsXI2B2rk2
3Xtvr5A7zRqtGIdEDWSoKjAGJSN9+mhQpglKI3zJQ3GBGdIPeEqzgSud5SNHu01a
IaMCgYgyoxtqdWi90iE75/x+uIVGJRdHtWoL2dr8Ixu1bOMjKCR8gjneSRTqI1tA
lbRH5K/jg6iccB/pQmBcIPIubF10Nv/ZQV760WK/h6ue2hOCaBLWT8EQEEfBfnp+
9rfBfNQIQIkBFTfGIHXUUPb9sJgDP1boUxcqxr9bpKUrs1EMkUd+PrvpHIj2
-----END RSA PRIVATE KEY-----
'''
privkey = rsa.PrivateKey.load_pkcs1(privkey,'PEM')
decrypted_message = rsa.decrypt(encrypted_message, privkey)
msg = decrypted_message.decode().split('####')
product = msg[0]
computernameLicense = msg[1]
computername = socket.getfqdn()
licenseValid = False
if product.lower() == 'aion':
if computernameLicense == computername:
uuidlicense = msg[3]
uuid = guid()
if uuidlicense == uuid:
current_date = datetime.now()
license_expiry_date = msg[5]
license_expiry_date = datetime.strptime(license_expiry_date,'%Y-%m-%d %H:%M:%S')
if current_date > license_expiry_date:
return False,'License Expire'
else:
return True,''
return False,'License Error'
except Exception as e:
print(e)
return False,'License Error'
else:
return False,'Generate License'
def generate_record_key(product,version):
computername = socket.getfqdn()
macaddress = getmac.get_mac_address()
license_date = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
try:
user = os.getlogin()
except:
user = 'NA'
uuid = guid()
msg = product+'###'+version+'###'+computername+'###'+macaddress+'###'+user+'###'+sys.platform+'###'+uuid+'###'+license_date
pkeydata='''-----BEGIN RSA PUBLIC KEY-----
MIIBCgKCAQEAm75ZwaepuxGJjU1Slk1+IUO2E49Hy8i9dym5FUaBRyTRH6R+GTF1
kcpd+1QinIZDMIdsmAc95Y8pTufxY30QxCkOhVASitSQWHS/IiWQHmsTJwdr38lq
ZnQQloOt/iPlhcavbxu/yKFzwBmp+nM+ErDTnCBh6EGCGrw1xWF30T2IBpmpWwME
oqZsFV69RzwQAw39KG1KCxi5uscrB62YPgUdlT2b4Yaa90egQhGLLVdnKvhPORiG
T9omCH90Dkm1oMMQ0Y2JBLe |
zgXa/bunSqtTBxEwzlwUAX2JJcanFYrzKy2OLxzwN
RlWUXilZ4R/1RHAgUdNyKbYxZqc24MApoQIDAQAB
-----END RSA PUBLIC KEY-----
'''
pubkey = rsa.PublicKey.load_pkcs1(pkeydata)
encrypted_message = rsa.encrypt(msg.encode(), pubkey)
encrypted_message = binascii.hexlify(encrypted_message).decode()
return(encrypted_message)
def run(cmd):
try:
return subprocess.run(cmd, shell=True, capture_output=True, check=True, encoding="utf-8").stdout.strip()
except Exception as e:
print(e)
return None
def guid():
if sys.platform == 'darwin':
return run(
"ioreg -d2 -c IOPlatformExpertDevice | awk -F\\\\\\" '/IOPlatformUUID/{print $(NF-1)}'",
)
if sys.platform == 'win32' or sys.platform == 'cygwin' or sys.platform == 'msys':
return run('wmic csproduct get uuid').split('\\n')[2].strip()
if sys.platform.startswith('linux'):
return run('cat /var/lib/dbus/machine-id') or \\
run('cat /etc/machine-id')
if sys.platform.startswith('openbsd') or sys.platform.startswith('freebsd'):
return run('cat /etc/hostid') or \\
run('kenv -q smbios.system.uuid')
def updateLicense(licensekey):
license_folder = os.path.join(DATA_DIR,'License')
license_folder = Path(license_folder)
license_folder.mkdir(parents=True, exist_ok=True)
license_file = license_folder/'license.lic'
with open(license_file, "w") as fl:
fl.write(licensekey)
fl.close()
def enterRecord(version):
validLicense,msg = checklicense()
if not validLicense:
key = generate_record_key('AION',version)
msg = {'status':msg,'key':key,'licenseKey':'','link':''}
return validLicense,msg
<s> #from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from http.server import BaseHTTPRequestHandler,HTTPServer
#from SocketServer import ThreadingMixIn
from socketserver import ThreadingMixIn
'''
from augustus.core.ModelLoader import ModelLoader
from augustus.strict import modelLoader
'''
import pandas as pd
import os,sys
from os.path import expanduser
import platform
import numpy as np
import configparser
import threading
import subprocess
import argparse
import re
import cgi
from datetime import datetime
import json
import sys
from datetime import datetime
user_records = {}
class LocalModelData(object):
models = {}
class HTTPRequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
print("PYTHON ######## REQUEST ####### STARTED")
if None != re.search('/AION/', self.path):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
if ctype == 'application/json':
length = int(self.headers.get('content-length'))
data = self.rfile.read(length)
model = self.path.split('/')[-2]
operation = self.path.split('/')[-1]
data = json.loads(data)
dataStr = json.dumps(data)
if operation.lower() == 'predict':
predict_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'AION','aion_predict.py')
outputStr = subprocess.check_output([sys.executable,predict_path,dataStr])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
resp = outputStr
elif operation.lower() == 'explain':
predict_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'AION','aion_xai.py')
outputStr = subprocess.check_output([sys.executable,predict_path,'local',dataStr])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'aion_ai_explanation:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
resp = outputStr
resp=resp+"\\n"
resp=resp.encode()
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(resp)
else:
print("python ==> else2")
data = {}
elif None != re.search('/AION/pattern_anomaly_predict', self.path):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
if ctype == 'application/json':
length = int(self.headers.get('content-length'))
model = self.path.split('/')[-1]
data = self.rfile.read(length)
data = json.loads(data)
anomaly = False
remarks = ''
clusterid = -1
home = expanduser("~")
if platform.system() == 'Windows':
configfilename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'datadetails.json')
filename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'clickstream.json')
clusterfilename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'stateClustering.csv')
probfilename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'stateTransitionProbability.csv')
else:
configfilename = os.path.join(home,'HCLT','AION','target',model,'datadetails.json')
filename = os.path.join(home,'HCLT','AION','target',model,'clickstream.json')
clusterfilename = os.path.join(home,'HCLT','AION','target',model,'stateClustering.csv')
probfilename = os.path.join(home,'HCLT','AION','target',model,'stateTransitionProbability.csv')
dfclus = pd.read_csv(clusterfilename)
dfprod = pd.read_csv(probfilename)
f = open(configfilename, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
activity = configSettingsJson['activity']
sessionid = configSettingsJson['sessionid']
f = open(filename, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
groupswitching = configSettingsJson['groupswitching']
page_threshold = configSettingsJson['transitionprobability']
chain_count = configSettingsJson['transitionsequence']
chain_probability = configSettingsJson['sequencethreshold']
currentactivity = data[activity]
if bool(user_records):
sessionid = data[sessionid]
print(sessionid,user_records['SessionID'])
if sessionid != user_records['SessionID']:
user_records['SessionID'] = sessionid
prevactivity = ''
user_records['probarry'] = []
user_records['prevclusterid'] = -1
user_records['NoOfClusterHopping'] = 0
user_records['pageclicks'] = 1
else:
prevactivity = user_records['Activity']
user_records['Activity'] = currentactivity
pageswitch = True
if prevactivity == currentactivity or prevactivity == '':
probability = 0
pageswitch = False
remarks = ''
else:
user_records['pageclicks'] += 1
df1 = dfprod[(dfprod['State'] == prevactivity) & (dfprod['NextState'] == currentactivity)]
if df1.empty:
remarks = 'Anomaly Detected - User in unusual state'
anomaly = True
clusterid = -1
probability = 0
user_records['probarry'].append(probability)
n=int(chain_count)
num_list = user_records['probarry'][-n:]
avg = sum(num_list)/len(num_list)
for index, row in dfclus.iterrows():
clusterlist = row["clusterlist"]
if currentactivity in clusterlist:
clusterid = row["clusterid"]
else:
probability = df1['Probability'].iloc[0]
user_records['probarry'].append(probability)
n=int(chain_count)
num_list = user_records['probarry'][-n:]
davg = sum(num_list)/len(num_list)
for index, row in dfclus.iterrows():
clusterlist = row["clusterlist"]
if currentactivity in clusterlist:
clusterid = row["clusterid"]
remarks = ''
if user_records['prevclusterid'] != -1:
if probability == 0 and user_records['prevclusterid'] != clusterid:
user_records['NoOfClusterHopping'] = user_records['NoOfClusterHopping']+1
if user_records['pageclicks'] == 1:
remarks = 'Anomaly Detected - Frequent Cluster Hopping'
anomaly = True
else:
remarks = 'Cluster Hopping Detected'
user_records['pageclicks'] = 0
if user_records['NoOfClusterHopping'] > int(groupswitching) and anomaly == False:
remarks = 'Anomaly Detected - Multiple Cluster Hopping'
anomaly = True
elif probability == 0:
remarks = 'Anomaly Detected - Unusual State Transition Detected'
anomaly = True
elif probability <= float(page_threshold):
remarks = 'Anomaly Detected - In-frequent State Transition Detected'
anomaly = True
else:
#print(pageswitch)
#print(probability)
if pageswitch == True:
if probability == 0:
remarks = 'Anomaly Detected - Unusual State Transition Detected'
anomaly = True
elif probability <= float(page_threshold):
remarks = 'Anomaly Detected - In-frequent State Transition Detected'
anomaly = True
else:
remarks = ''
if davg < float(chain_probability):
if anomaly == False:
remarks = 'Anomaly Detected - In-frequent Pattern Detected'
anomaly = True
else:
user_records['SessionID'] = data[sessionid]
user_records['Activity'] = data[activity]
user_records['probability'] = 0
user_records['probarry'] = []
user_records['chainprobability'] = 0
user_records['prevclusterid'] = -1
user_records['NoOfClusterHopping'] = 0
user_records['pageclicks'] = 1
for index, row in dfclus.iterrows():
clusterlist = row["clusterlist"]
if currentactivity in clusterlist:
clusterid = row["clusterid"]
user_records['prevclusterid'] = clusterid
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
resp = '{"status":"SUCCESS","data":{"Anomaly":"'+str(anomaly)+'","Remarks":"'+str(remarks)+'"}}'
resp=resp+"\\n"
resp=resp.encode()
self.wfile.write(resp)
else:
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
resp = |
'{"Anomaly":"Error","Remarks":"'+str(Int)+'"}'
resp=resp+"\\n"
resp=resp.encode()
self.wfile.write(resp)
elif None != re.search('/AION/pattern_anomaly_settings', self.path):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
if ctype == 'application/json':
length = int(self.headers.get('content-length'))
data = self.rfile.read(length)
#print(data)
#keyList = list(data.keys())
#print(keyList[0])
model = self.path.split('/')[-1]
#print(model)
data = json.loads(data)
#dataStr = json.dumps(data)
groupswitching = data['groupswitching']
transitionprobability = data['transitionprobability']
transitionsequence = data['transitionsequence']
sequencethreshold = data['sequencethreshold']
home = expanduser("~")
if platform.system() == 'Windows':
filename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'clickstream.json')
else:
filename = os.path.join(home,'HCLT','AION','target',model,'clickstream.json')
#print(filename)
data = {}
data['groupswitching'] = groupswitching
data['transitionprobability'] = transitionprobability
data['transitionsequence'] = transitionsequence
data['sequencethreshold'] = sequencethreshold
updatedConfig = json.dumps(data)
with open(filename, "w") as fpWrite:
fpWrite.write(updatedConfig)
fpWrite.close()
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
resp = '{"Status":"SUCCESS"}'
resp=resp+"\\n"
resp=resp.encode()
self.wfile.write(resp)
else:
print("python ==> else2")
data = {}
else:
print("python ==> else1")
self.send_response(403)
self.send_header('Content-Type', 'application/json')
self.end_headers()
print("PYTHON ######## REQUEST ####### ENDED")
return
def do_GET(self):
print("PYTHON ######## REQUEST ####### STARTED")
if None != re.search('/AION/predict', self.path):
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
model = self.path.split('/')[-1]
display_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'target',model,'display.json')
displaymsg = 'Data in JSON Format'
if(os.path.isfile(display_path)):
with open(display_path) as file:
config = json.load(file)
file.close()
features = config['modelFeatures']
datajson={}
for feature in features:
datajson[feature] = 'Value'
displaymsg = json.dumps(datajson)
msg="""
URL:{url}
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
""".format(url=self.path,displaymsg=displaymsg)
self.wfile.write(msg.encode())
else:
self.send_response(403)
self.send_header('Content-Type', 'application/json')
self.end_headers()
return
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
allow_reuse_address = True
def shutdown(self):
self.socket.close()
HTTPServer.shutdown(self)
class SimpleHttpServer():
def __init__(self, ip, port):
self.server = ThreadedHTTPServer((ip,port), HTTPRequestHandler)
def start(self):
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def waitForThread(self):
self.server_thread.join()
def stop(self):
self.server.shutdown()
self.waitForThread()
if __name__=='__main__':
parser = argparse.ArgumentParser(description='HTTP Server')
parser.add_argument('port', type=int, help='Listening port for HTTP Server')
parser.add_argument('ip', help='HTTP Server IP')
args = parser.parse_args()
server = SimpleHttpServer(args.ip, args.port)
#delete file
#create file
#write just msg as "started"
print('HTTP Server Running...........')
#file close
server.start()
server.waitForThread()
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> #from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from http.server import BaseHTTPRequestHandler,HTTPServer
#from SocketServer import ThreadingMixIn
from socketserver import ThreadingMixIn
'''
from augustus.core.ModelLoader import ModelLoader
from augustus.strict import modelLoader
'''
import pandas as pd
from datetime import datetime
import os,sys
from os.path import expanduser
import platform
import numpy as np
import configparser
import threading
import subprocess
import argparse
import re
import cgi
import time
from datetime import datetime
import json
import sys
from datetime import datetime
import sqlite3
from os.path import expanduser
from pathlib import Path
from io import BytesIO
DEPLOY_DATABASE_PATH = os.path.join(os.path.join(os.path.dirname(__file__)),'database')
targetPath = Path(DEPLOY_DATABASE_PATH)
targetPath.mkdir(parents=True, exist_ok=True)
modelVersion = 'run_1'
version = 1
class sqlite_db():
def __init__(self, location, database_file=None):
if not isinstance(location, Path):
location = Path(location)
if database_file:
self.database_name = database_file
else:
self.database_name = location.stem + '.db'
db_file = str(location/self.database_name)
self.conn = sqlite3.connect(db_file)
self.cursor = self.conn.cursor()
self.tables = []
def table_exists(self, name):
if name in self.tables:
return True
elif name:
query = f"SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';"
listOfTables = self.cursor.execute(query).fetchall()
if len(listOfTables) > 0 :
self.tables.append(name)
return True
return False
def read(self, table_name,condition=''):
if condition == '':
return pd.read_sql_query(f"SELECT * FROM {table_name}", self.conn)
else:
return pd.read_sql_query(f"SELECT * FROM {table_name} WHERE {condition}", self.conn)
def create_table(self,name, columns, dtypes):
query = f'CREATE TABLE IF NOT EXISTS {name} ('
for column, data_type in zip(columns, dtypes):
query += f"'{column}' TEXT,"
query = query[:-1]
query += ');'
self.conn.execute(query)
return True
def update(self,table_name,updates,condition):
update_query = f'UPDATE {table_name} SET {updates} WHERE {condition}'
self.cursor.execute(update_query)
self.conn.commit()
return True
def write(self,data, table_name):
if not self.table_exists(table_name):
self.create_table(table_name, data.columns, data.dtypes)
tuple_data = list(data.itertuples(index=False, name=None))
insert_query = f'INSERT INTO {table_name} VALUES('
for i in range(len(data.columns)):
insert_query += '?,'
insert_query = insert_query[:-1] + ')'
self.cursor.executemany(insert_query, tuple_data)
self.conn.commit()
return True
def delete(self, name):
pass
def close(self):
self.conn.close()
user_records = {}
class LocalModelData(object):
models = {}
class HTTPRequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
print("PYTHON ######## REQUEST ####### STARTED")
if None != re.search('/AION/', self.path):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
if ctype == 'application/json':
length = int(self.headers.get('content-length'))
#data = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1)
data = self.rfile.read(length)
model = self.path.split('/')[-2]
operation = self.path.split('/')[-1]
#data = json.loads(data)
#dataStr = json.dumps(data)
home = expanduser("~")
dataStr = data
sqlite_dbObj = sqlite_db(DEPLOY_DATABASE_PATH,'deploy.db')
model_path = os.path.join(os.path.dirname(__file__),modelVersion)
DATA_FILE_PATH = os.path.join(os.path.dirname(__file__),'temp')
Path(DATA_FILE_PATH).mkdir(parents=True, exist_ok=True)
isdir = os.path.isdir(model_path)
if isdir:
if operation.lower() == 'predict':
if not sqlite_dbObj.table_exists('servingDetails'):
data = {'usecase':model,'noOfPredictCalls':0,'noOfDriftCalls':0}
data = pd.DataFrame(data, index=[0])
sqlite_dbObj.create_table('servingDetails',data.columns, data.dtypes)
df2 = pd.read_json(BytesIO(dataStr), orient ='records')
if not sqlite_dbObj.table_exists('prodData'):
sqlite_dbObj.create_table('prodData',df2.columns, df2.dtypes)
sqlite_dbObj.write(df2,'prodData')
data = sqlite_dbObj.read('servingDetails',"usecase = '"+model+"'")
if len(data) == 0:
data = {'usecase':model,'noOfPredictCalls':1,'noOfDriftCalls':0}
data = pd.DataFrame(data, index=[0])
sqlite_dbObj.write(data,'servingDetails')
else:
noofPredictCalls = int(data['noOfPredictCalls'].iloc[0])+1
sqlite_dbObj.update('servingDetails',"noOfPredictCalls = '"+str(noofPredictCalls)+"'","usecase = '"+model+"'")
predict_path = os.path.join(model_path,'aion_predict.py')
outputStr = subprocess.check_output([sys.executable,predict_path,dataStr])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
resp = outputStr
elif operation.lower() == 'monitoring':
if not sqlite_dbObj.table_exists('monitoring'):
data = {'usecase':model,'status':'No Drift','Msg':'No Input Drift Found','RecordTime':'Time','version':'1'}
data = pd.DataFrame(data, index=[0])
sqlite_dbObj.create_table('monitoring',data.columns, data.dtypes)
trainingDataPath = os.path.join(os.path.dirname(__file__),modelVersion,'data','preprocesseddata.csv')
data = sqlite_dbObj.read('prodData')
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
data.to_csv(dataFile, index=False)
predict_path = os.path.join(model_path,'aion_ipdrift.py')
inputJSON={'trainingDataLocation':trainingDataPath,'currentDataLocation':dataFile}
outputStr = subprocess.check_output([sys.executable,predict_path,json.dumps(inputJSON)])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'drift:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
outputData = json.loads(outputStr)
status = outputData['status']
if status == 'SUCCESS':
Msg = str(outputData['data'])
else:
Msg = 'Error during drift analysis'
|
now = datetime.now() # current date and time
date_time = now.strftime("%m/%d/%Y, %H:%M:%S")
data = {'usecase':model,'status':status,'Msg':Msg,'RecordTime':date_time,'version':version}
data = pd.DataFrame(data, index=[0])
sqlite_dbObj.write(data,'monitoring')
resp = outputStr
resp=resp+"\\n"
resp=resp.encode()
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(resp)
else:
print("python ==> else2")
data = {}
elif None != re.search('/AION/pattern_anomaly_predict', self.path):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
if ctype == 'application/json':
length = int(self.headers.get('content-length'))
model = self.path.split('/')[-1]
data = self.rfile.read(length)
data = json.loads(data)
anomaly = False
remarks = ''
clusterid = -1
home = expanduser("~")
if platform.system() == 'Windows':
configfilename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'datadetails.json')
filename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'clickstream.json')
clusterfilename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'stateClustering.csv')
probfilename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'stateTransitionProbability.csv')
else:
configfilename = os.path.join(home,'HCLT','AION','target',model,'datadetails.json')
filename = os.path.join(home,'HCLT','AION','target',model,'clickstream.json')
clusterfilename = os.path.join(home,'HCLT','AION','target',model,'stateClustering.csv')
probfilename = os.path.join(home,'HCLT','AION','target',model,'stateTransitionProbability.csv')
dfclus = pd.read_csv(clusterfilename)
dfprod = pd.read_csv(probfilename)
f = open(configfilename, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
activity = configSettingsJson['activity']
sessionid = configSettingsJson['sessionid']
f = open(filename, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
groupswitching = configSettingsJson['groupswitching']
page_threshold = configSettingsJson['transitionprobability']
chain_count = configSettingsJson['transitionsequence']
chain_probability = configSettingsJson['sequencethreshold']
currentactivity = data[activity]
if bool(user_records):
sessionid = data[sessionid]
print(sessionid,user_records['SessionID'])
if sessionid != user_records['SessionID']:
user_records['SessionID'] = sessionid
prevactivity = ''
user_records['probarry'] = []
user_records['prevclusterid'] = -1
user_records['NoOfClusterHopping'] = 0
user_records['pageclicks'] = 1
else:
prevactivity = user_records['Activity']
user_records['Activity'] = currentactivity
pageswitch = True
if prevactivity == currentactivity or prevactivity == '':
probability = 0
pageswitch = False
remarks = ''
else:
user_records['pageclicks'] += 1
df1 = dfprod[(dfprod['State'] == prevactivity) & (dfprod['NextState'] == currentactivity)]
if df1.empty:
remarks = 'Anomaly Detected - User in unusual state'
anomaly = True
clusterid = -1
probability = 0
user_records['probarry'].append(probability)
n=int(chain_count)
num_list = user_records['probarry'][-n:]
avg = sum(num_list)/len(num_list)
for index, row in dfclus.iterrows():
clusterlist = row["clusterlist"]
if currentactivity in clusterlist:
clusterid = row["clusterid"]
else:
probability = df1['Probability'].iloc[0]
user_records['probarry'].append(probability)
n=int(chain_count)
num_list = user_records['probarry'][-n:]
davg = sum(num_list)/len(num_list)
for index, row in dfclus.iterrows():
clusterlist = row["clusterlist"]
if currentactivity in clusterlist:
clusterid = row["clusterid"]
remarks = ''
if user_records['prevclusterid'] != -1:
if probability == 0 and user_records['prevclusterid'] != clusterid:
user_records['NoOfClusterHopping'] = user_records['NoOfClusterHopping']+1
if user_records['pageclicks'] == 1:
remarks = 'Anomaly Detected - Frequent Cluster Hopping'
anomaly = True
else:
remarks = 'Cluster Hopping Detected'
user_records['pageclicks'] = 0
if user_records['NoOfClusterHopping'] > int(groupswitching) and anomaly == False:
remarks = 'Anomaly Detected - Multiple Cluster Hopping'
anomaly = True
elif probability == 0:
remarks = 'Anomaly Detected - Unusual State Transition Detected'
anomaly = True
elif probability <= float(page_threshold):
remarks = 'Anomaly Detected - In-frequent State Transition Detected'
anomaly = True
else:
#print(pageswitch)
#print(probability)
if pageswitch == True:
if probability == 0:
remarks = 'Anomaly Detected - Unusual State Transition Detected'
anomaly = True
elif probability <= float(page_threshold):
remarks = 'Anomaly Detected - In-frequent State Transition Detected'
anomaly = True
else:
remarks = ''
if davg < float(chain_probability):
if anomaly == False:
remarks = 'Anomaly Detected - In-frequent Pattern Detected'
anomaly = True
else:
user_records['SessionID'] = data[sessionid]
user_records['Activity'] = data[activity]
user_records['probability'] = 0
user_records['probarry'] = []
user_records['chainprobability'] = 0
user_records['prevclusterid'] = -1
user_records['NoOfClusterHopping'] = 0
user_records['pageclicks'] = 1
for index, row in dfclus.iterrows():
clusterlist = row["clusterlist"]
if currentactivity in clusterlist:
clusterid = row["clusterid"]
user_records['prevclusterid'] = clusterid
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
resp = '{"status":"SUCCESS","data":{"Anomaly":"'+str(anomaly)+'","Remarks":"'+str(remarks)+'"}}'
resp=resp+"\\n"
resp=resp.encode()
self.wfile.write(resp)
else:
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
resp = '{"Anomaly":"Error","Remarks":"'+str(Int)+'"}'
resp=resp+"\\n"
resp=resp.encode()
self.wfile.write(resp)
elif None != re.search('/AION/pattern_anomaly_settings', self.path):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
if ctype == 'application/json':
length = int(self.headers.get('content-length'))
data = self.rfile.read(length)
#print(data)
#keyList = list(data.keys())
#print(keyList[0])
model = self.path.split('/')[-1]
#print(model)
data = json.loads(data)
#dataStr = json.dumps(data)
groupswitching = data['groupswitching']
transitionprobability = data['transitionprobability']
transitionsequence = data['transitionsequence']
sequencethreshold = data['sequencethreshold']
home = expanduser("~")
if platform.system() == 'Windows':
filename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'clickstream.json')
else:
filename = os.path.join(home,'HCLT','AION','target',model,'clickstream.json')
#print(filename)
data = {}
data['groupswitching'] = groupswitching
data['transitionprobability'] = transitionprobability
data['transitionsequence'] = transitionsequence
data['sequencethreshold'] = sequencethreshold
updatedConfig = json.dumps(data)
with open(filename, "w") as fpWrite:
fpWrite.write(updatedConfig)
fpWrite.close()
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
resp = '{"Status":"SUCCESS"}'
resp=resp+"\\n"
resp=resp.encode()
self.wfile.write(resp)
else:
print("python ==> else2")
data = {}
else:
print("python ==> else1")
self.send_response(403)
self.send_header('Content-Type', 'application/json')
self.end_headers()
print("PYTHON ######## REQUEST ####### ENDED")
return
def do_GET(self):
print("PYTHON ######## REQUEST ####### STARTED")
if None != re.search('/AION/', self.path) or None != re.search('/aion/', self.path):
usecase = self.path.split('/')[-2]
operation = self.path.split('/')[-1]
if operation.lower() == 'metrices':
sqlite_dbObj = sqlite_db(DEPLOY_DATABASE_PATH,'deploy.db')
if sqlite_dbObj.table_exists('servingDetails'):
df1 = sqlite_dbObj.read('servingDetails',"usecase = '"+usecase+"'")
else:
df1 = pd.DataFrame()
if sqlite_dbObj.table_exists('monitoring'):
df2 = sqlite_dbObj.read('monitoring')
else:
df2 = pd.DataFrame()
if sqlite_dbObj.table_exists('modeldetails'):
df3 = sqlite_dbObj.read('modeldetails')
else:
df3 = pd.DataFrame()
msg='<html>\\n'
msg+='<head>\\n'
msg+='<title>Model Metrices</title>\\n'
msg+='</head>\\n'
msg+="""<style>table, th, td { border: 1px solid |
black; border-collapse: collapse;}</style>"""
msg+='<body>\\n'
msg+='<h2>Model Metrices - Deployed Version '+str(version)+'</h2>'
msg+='<br/>\\n'
msg+='<table style="width:80%">\\n'
msg+="""<tr>
<th>Model</th>
<th>Version</th>
<th>ScoreType</th>
<th>Score</th>
</tr
"""
for idx in reversed(df3.index):
msg += "<tr>\\n"
msg += "<td>"+str(df3.usecase[idx])+"</td>\\n"
msg += "<td>"+str(df3.version[idx])+"</td>\\n"
msg += "<td>"+str(df3.scoreType[idx])+"</td>\\n"
msg += "<td>"+str(df3.score[idx])+"</td>\\n"
msg += "</tr>\\n"
msg += '</table>\\n'
msg += '<br/>\\n'
msg += '<br/>\\n'
msg+='<table style="width:50%">\\n'
msg+='<tr>\\n'
msg+='<td>No of Predictions</td>\\n'
if df1.shape[0] > 0:
msg+='<td>'+str(df1['noOfPredictCalls'].iloc[0])+'</td>\\n'
else:
msg+='<td>0</td>\\n'
msg+='</tr>\\n'
msg+='<tr>\\n'
msg+='<td>No of Ground Truth</td>\\n'
msg+='<td>0</td>\\n'
msg+='</tr>\\n'
msg += '</table>\\n'
msg += '<br/>\\n'
msg+='<table style="width:100%">\\n'
msg+="""<tr>
<th>UseCase</th>
<th>Version</th>
<th>Status</th>
<th>Message</th>
<th>Time</th>
</tr>
"""
for idx in reversed(df2.index):
msg += "<tr>\\n"
msg += "<td>"+str(df2.usecase[idx])+"</td>\\n"
msg += "<td>"+str(df3.version[idx])+"</td>\\n"
msg += "<td>"+str(df2.status[idx])+"</td>\\n"
msg += "<td>"+str(df2.Msg[idx])+"</td>\\n"
msg += "<td>"+str(df2.RecordTime[idx])+"</td>\\n"
msg += "</tr>\\n"
msg += '</table>\\n'
msg += '</body>\\n'
msg += '</html>\\n'
self.send_response(200)
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(msg.encode())
else:
self.send_response(403)
self.send_header('Content-Type', 'application/json')
self.end_headers()
return
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
allow_reuse_address = True
def shutdown(self):
self.socket.close()
HTTPServer.shutdown(self)
class file_status():
def __init__(self,file):
self.files_status = {}
self.initializeFileStatus(file)
def initializeFileStatus(self, file):
self.files_status = {'path': file, 'time':file.stat().st_mtime}
def is_file_changed(self):
if self.files_status['path'].stat().st_mtime > self.files_status['time']:
self.files_status['time'] = self.files_status['path'].stat().st_mtime
return True
return False
def run(self):
while( True):
time.sleep(30)
if self.is_file_changed():
readRun()
class SimpleHttpServer():
def __init__(self, ip, port,model_file_path):
self.server = ThreadedHTTPServer((ip,port), HTTPRequestHandler)
self.status_checker = file_status(model_file_path)
def start(self):
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
self.status_thread = threading.Thread(target=self.status_checker.run)
self.status_thread.start()
def waitForThread(self):
self.server_thread.join()
def stop(self):
self.server.shutdown()
self.waitForThread()
def readRun(boot=False):
filename = os.path.join(os.path.join(os.path.dirname(__file__)),'run')
f = open (filename, "r")
data = json.loads(f.read())
global modelVersion
global version
modelVersion = 'run_'+str(data['version'])
version = str(data['version'])
sqlite_dbObj = sqlite_db(DEPLOY_DATABASE_PATH,'deploy.db')
dataa = {'usecase':data['usecase'],'version':data['version'],'scoreType':data['scoreType'],'score':data['score']}
data = pd.DataFrame(dataa, index=[0])
if not sqlite_dbObj.table_exists('modeldetails'):
sqlite_dbObj.create_table('modeldetails',data.columns, data.dtypes)
rdata = sqlite_dbObj.read('modeldetails',"version = '"+dataa['version']+"'")
if (rdata.shape[0]) == 0 or (not boot):
sqlite_dbObj.write(data,'modeldetails')
readRun(boot=True)
if __name__=='__main__':
filename = os.path.join(os.path.join(os.path.dirname(__file__)),'run')
parser = argparse.ArgumentParser(description='HTTP Server')
parser.add_argument('port', type=int, help='Listening port for HTTP Server')
parser.add_argument('ip', help='HTTP Server IP')
args = parser.parse_args()
server = SimpleHttpServer(args.ip, args.port,Path(filename))
#delete file
#create file
#write just msg as "started"
print('HTTP Server Running...........')
#file close
server.start()
server.waitForThread()
<s> import sys
import os
import pickle
import json
import traceback
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
import dask.dataframe as dd
import scipy
from pandas import json_normalize
import dask.distributed
from dask_ml.wrappers import ParallelPostFit
class incBatchPredictor():
def __init__(self):
self.home = os.path.dirname(os.path.abspath(__file__))
self.configPath = os.path.join(self.home, 'etc', 'config.json')
self.configDict = {}
self.incProfilerPath = ''
self.incSelectorPath = ''
self.modelPath = ''
self.incProfiler = None
self.incSelector = None
self.model = None
self.targetFeature = None
self.trainingFeatures = None
self.modelName = ''
self.problemType = ''
self.algorithm = ''
self.dataLocation = ""
self.nworkers = None
self.npartitions = None
self.threadsPerWorker = None
def get_nworkers(self):
return self.nworkers
def get_npartitions(self):
return self.npartitions
def get_threadsPerWorker(self):
return self.threadsPerWorker
def readData(self,data):
try:
if os.path.splitext(data)[1] in [".tsv",".csv",".data"]:
df = dd.read_csv(data, # sep=r'\\s*,\\s*',
assume_missing=True,
parse_dates=True, infer_datetime_format=True,
sample=1000000,
# dtype={'caliper': 'object',
# 'timestamp': 'object'},
# dtype='object',
na_values=['-','?']
)
df = df.repartition(self.npartitions)
else:
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
for key in jsonData:
if type(jsonData[key]) == str:
try:
x = eval(jsonData[key])
if type(x) == int:
jsonData[key] = int(jsonData[key])
print("check inside ==int")
if type(x) == float:
jsonData[key] = float(jsonData[key])
except:
pass
df = json_normalize(jsonData)
df = df.replace(r'^\\s*$', np.NaN, regex=True)
df = dd.from_pandas(df, npartitions=self.npartitions)
except KeyError as e:
output = {"status":"FAIL","message":str(e).strip('"')}
print(json.dumps(output))
except Exception as e:
output = {"status":"FAIL","message":str(e).strip('"')}
print(json.dumps(output))
return df
def readConfig(self):
with open(self.configPath, 'r', encoding= 'utf8') as f:
self.configDict = json.load(f)
self.targetFeature = self.configDict['targetFeature']
self.trainingFeatures = self.configDict['trainingFeatures']
self.modelName = self.configDict["modelName"]
self.modelVersion = self.configDict["modelVersion"]
self.dataLocation = self.configDict["dataLocation"]
self.deployLocation = self.configDict["deployLocation"]
self.incProfilerPath = self.configDict["profilerLocation"]
self.incSelectorPath = self.configDict["selectorLocation"]
self.problemType = self.configDict['analysisType']
self.algorithm = self.configDict["algorithm"]
self.modelPath = self.configDict["modelLocation"]
self.scoringCriteria = self.configDict['scoringCriteria']
self.nworkers = int(self.configDict["n_workers"])
self.npartitions = int(self.configDict["npartitions"])
self.threadsPerWorker = int(self.configDict["threads_per_worker"])
def pickleLoad(self, file):
if os.path.exists(file):
with open(file, 'rb') as f:
model = pickle.load(f)
return model
else:
return None
def loadSavedModels(self):
self.incProfiler = self.pickleLoad(os.path.join(self.home, 'model',self.incProfilerPath))
if self.incSelectorPath != '':
self.incSelector = self.pickleLoad(os.path.join(self.home, 'model',self.incSelectorPath))
self.model = self.pickleLoad(os.path.join(self.home, 'model',self.modelPath))
def dataFramePreProcess(self, df):
df = df.replace(r'^\\s*$', np.NaN, regex=True)
df = df.replace('-', np.nan)
df = df.replace('?', np.nan)
return df
def profiler(self, df):
X = self.dataFramePreProcess(df)
if 'self.targetFeature' in X:
X = X.drop(self.targetFeature, axis=1)
X = self.incProfiler.transform(X)
if self.incSelectorPath != '':
X = self.incSelector.transform(X.to_dask_array(lengths=True))
# X = dd.from_dask_array(X)
return X
def trainedModel(self,X):
ParallelPostFit(estimator=self.model)
# preds = self.model.predict(X)
if self.algorithm=="Distributed Light Gradient Boosting (LightGBM)":
X = X.to_dask_array(lengths=True)
preds = self.model.predict(X).compute()
return preds
def apply_output_format(self,df,modeloutput):
label_maping = None
if self.problemType.lower() == 'regression':
if not isinstance(modeloutput, np.ndarray):
modeloutput = modeloutput.to_numpy()
dask_arr = dd.from_array(modeloutput)
dask_arr.name = 'prediction'
df = df.merge(dask_arr.to_frame())
df['rounded_prediction'] = df['prediction'].round(2)
elif self.problemType.lower() == 'classification':
print("type: ", type(modeloutput))
if not isinstance(modeloutput, np.ndarray):
modeloutput = modeloutput.to_numpy()
dask_arr = dd.from_array(modeloutput)
dask_arr.name = "prediction"
df = df.merge(dask_arr.to_frame())
with open(self.deployLocation + "/etc/" + "label_mapping.json") as jsonfile:
label_maping = json.load(jsonfile)
df["prediction"] = df[" |
prediction"].astype(int)
df["prediction"] = df["prediction"].astype(str)
df["prediction_label"] = df["prediction"].map(label_maping)
if df["prediction_label"].dtype == None:
df["prediction_label"] = df["prediction"]
outputjson = df.compute().to_json(orient='records')
outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}
return(json.dumps(outputjson))
def predict(self,data):
try:
self.readConfig()
df = self.readData(data)
dfOrg = df.copy()
if len(self.configDict)!=0:
self.loadSavedModels()
df = self.profiler(df)
modeloutput = self.trainedModel(df)
# dfOrg = dfOrg[self.allFtrs]
output = self.apply_output_format(dfOrg, modeloutput)
else:
pass
except Exception as e:
print(traceback.format_exc())
output = {"status":"FAIL","message":str(e).strip('"')}
return output
if __name__ == "__main__":
incBPobj = incBatchPredictor()
incBPobj.readConfig()
nWorkers = incBPobj.get_nworkers()
threads_per_worker = incBPobj.get_threadsPerWorker()
cluster = dask.distributed.LocalCluster(n_workers=nWorkers,
threads_per_worker=threads_per_worker)
client = dask.distributed.Client(cluster)
output = incBPobj.predict(sys.argv[1])
print("predictions:",output)
client.close()
cluster.close()<s> '''
from AION_185 import aion_prediction
from AION_185 import featureslist
from AION_185 import aion_drift
from AION_185 import aion_performance
'''
#from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from http.server import BaseHTTPRequestHandler,HTTPServer
#from SocketServer import ThreadingMixIn
from socketserver import ThreadingMixIn
'''
from augustus.core.ModelLoader import ModelLoader
from augustus.strict import modelLoader
'''
import pandas as pd
import os,sys
from os.path import expanduser
import platform
import numpy as np
import configparser
import threading
import subprocess
import argparse
import re
import cgi
from datetime import datetime
import json
import sys
from datetime import datetime
user_records = {}
class LocalModelData(object):
models = {}
class HTTPRequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
print("PYTHON ######## REQUEST ####### STARTED")
if None != re.search('/AION/predict', self.path):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
if ctype == 'application/json':
length = int(self.headers.get('content-length'))
#data = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1)
data = self.rfile.read(length)
data = json.loads(data)
dataStr = json.dumps(data)
outputStr = aion_prediction.predict(dataStr)
outputStr = outputStr.strip()
resp = outputStr
resp=resp+"\\n"
resp=resp.encode()
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(resp)
else:
print("python ==> else2")
data = {}
elif None != re.search('/AION/features', self.path):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
if ctype == 'application/json':
length = int(self.headers.get('content-length'))
outputStr = featureslist.getfeatures()
outputStr = outputStr.strip()
resp = outputStr
resp=resp+"\\n"
resp=resp.encode()
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(resp)
else:
print("python ==> else2")
data = {}
elif None != re.search('/AION/monitoring', self.path):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
if ctype == 'application/json':
length = int(self.headers.get('content-length'))
data = self.rfile.read(length)
model = self.path.split('/')[-1]
data = json.loads(data)
dataStr = json.dumps(data)
outputStr = aion_drift.drift(dataStr)
outputStr = outputStr.strip()
resp = outputStr
resp=resp+"\\n"
resp=resp.encode()
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(resp)
else:
print("python ==> else2")
data = {}
elif None != re.search('/AION/performance', self.path):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
if ctype == 'application/json':
length = int(self.headers.get('content-length'))
data = self.rfile.read(length)
data = json.loads(data)
dataStr = json.dumps(data)
outputStr = aion_performance.drift(dataStr)
outputStr = outputStr.strip()
resp = outputStr
resp=resp+"\\n"
resp=resp.encode()
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(resp)
else:
print("python ==> else2")
data = {}
elif None != re.search('/AION/pattern_anomaly_predict', self.path):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
if ctype == 'application/json':
length = int(self.headers.get('content-length'))
model = self.path.split('/')[-1]
data = self.rfile.read(length)
data = json.loads(data)
anomaly = False
remarks = ''
clusterid = -1
home = expanduser("~")
if platform.system() == 'Windows':
configfilename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'datadetails.json')
filename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'clickstream.json')
clusterfilename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'stateClustering.csv')
probfilename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'stateTransitionProbability.csv')
else:
configfilename = os.path.join(home,'HCLT','AION','target',model,'datadetails.json')
filename = os.path.join(home,'HCLT','AION','target',model,'clickstream.json')
clusterfilename = os.path.join(home,'HCLT','AION','target',model,'stateClustering.csv')
probfilename = os.path.join(home,'HCLT','AION','target',model,'stateTransitionProbability.csv')
dfclus = pd.read_csv(clusterfilename)
dfprod = pd.read_csv(probfilename)
f = open(configfilename, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
activity = configSettingsJson['activity']
sessionid = configSettingsJson['sessionid']
f = open(filename, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
groupswitching = configSettingsJson['groupswitching']
page_threshold = configSettingsJson['transitionprobability']
chain_count = configSettingsJson['transitionsequence']
chain_probability = configSettingsJson['sequencethreshold']
currentactivity = data[activity]
if bool(user_records):
sessionid = data[sessionid]
print(sessionid,user_records['SessionID'])
if sessionid != user_records['SessionID']:
user_records['SessionID'] = sessionid
prevactivity = ''
user_records['probarry'] = []
user_records['prevclusterid'] = -1
user_records['NoOfClusterHopping'] = 0
user_records['pageclicks'] = 1
else:
prevactivity = user_records['Activity']
user_records['Activity'] = currentactivity
pageswitch = True
if prevactivity == currentactivity or prevactivity == '':
probability = 0
pageswitch = False
remarks = ''
else:
user_records['pageclicks'] += 1
df1 = dfprod[(dfprod['State'] == prevactivity) & (dfprod['NextState'] == currentactivity)]
if df1.empty:
remarks = 'Anomaly Detected - User in unusual state'
anomaly = True
clusterid = -1
probability = 0
user_records['probarry'].append(probability)
n=int(chain_count)
num_list = user_records['probarry'][-n:]
avg = sum(num_list)/len(num_list)
for index, row in dfclus.iterrows():
clusterlist = row["clusterlist"]
if currentactivity in clusterlist:
clusterid = row["clusterid"]
else:
probability = df1['Probability'].iloc[0]
user_records['probarry'].append(probability)
n=int(chain_count)
num_list = user_records['probarry'][-n:]
davg = sum(num_list)/len(num_list)
for index, row in dfclus.iterrows():
clusterlist = row["clusterlist"]
if currentactivity in clusterlist:
clusterid = row["clusterid"]
remarks = ''
if user_records['prevclusterid'] != -1:
if probability == 0 and user_records['prevclusterid'] != clusterid:
user_records['NoOfClusterHopping'] = user_records['NoOfClusterHopping']+1
if user_records['pageclicks'] == 1:
remarks = 'Anomaly Detected - Frequent Cluster Hopping'
anomaly = True
else:
remarks = 'Cluster Hopping Detected'
user_records['pageclicks'] = 0
if user_records['NoOfClusterHopping'] > int(groupswitching) and anomaly == False:
remarks = 'Anomaly Detected - Multiple Cluster Hopping'
anomaly = True
elif probability == 0:
remarks = 'Anomaly Detected - Unusual State Transition Detected'
anomaly = True
elif probability <= float(page_threshold):
remarks = 'Anomaly Detected - In-frequent State Transition Detected'
anomaly = True
else:
#print(pageswitch)
#print(probability)
if pageswitch == True:
if probability == 0:
remarks = 'Anomaly Detected - Unusual State Transition Detected'
anomaly = True
elif probability <= float(page_threshold):
remarks = 'Anomaly Detected - In-frequent State Transition Detected'
anomaly = True
else:
remarks = ''
if davg < float(chain_probability):
if anomaly == False:
remarks = 'An |
omaly Detected - In-frequent Pattern Detected'
anomaly = True
else:
user_records['SessionID'] = data[sessionid]
user_records['Activity'] = data[activity]
user_records['probability'] = 0
user_records['probarry'] = []
user_records['chainprobability'] = 0
user_records['prevclusterid'] = -1
user_records['NoOfClusterHopping'] = 0
user_records['pageclicks'] = 1
for index, row in dfclus.iterrows():
clusterlist = row["clusterlist"]
if currentactivity in clusterlist:
clusterid = row["clusterid"]
user_records['prevclusterid'] = clusterid
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
resp = '{"status":"SUCCESS","data":{"Anomaly":"'+str(anomaly)+'","Remarks":"'+str(remarks)+'"}}'
resp=resp+"\\n"
resp=resp.encode()
self.wfile.write(resp)
else:
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
resp = '{"Anomaly":"Error","Remarks":"'+str(Int)+'"}'
resp=resp+"\\n"
resp=resp.encode()
self.wfile.write(resp)
elif None != re.search('/AION/pattern_anomaly_settings', self.path):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
if ctype == 'application/json':
length = int(self.headers.get('content-length'))
data = self.rfile.read(length)
#print(data)
#keyList = list(data.keys())
#print(keyList[0])
model = self.path.split('/')[-1]
#print(model)
data = json.loads(data)
#dataStr = json.dumps(data)
groupswitching = data['groupswitching']
transitionprobability = data['transitionprobability']
transitionsequence = data['transitionsequence']
sequencethreshold = data['sequencethreshold']
home = expanduser("~")
if platform.system() == 'Windows':
filename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'clickstream.json')
else:
filename = os.path.join(home,'HCLT','AION','target',model,'clickstream.json')
#print(filename)
data = {}
data['groupswitching'] = groupswitching
data['transitionprobability'] = transitionprobability
data['transitionsequence'] = transitionsequence
data['sequencethreshold'] = sequencethreshold
updatedConfig = json.dumps(data)
with open(filename, "w") as fpWrite:
fpWrite.write(updatedConfig)
fpWrite.close()
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
resp = '{"Status":"SUCCESS"}'
resp=resp+"\\n"
resp=resp.encode()
self.wfile.write(resp)
else:
print("python ==> else2")
data = {}
else:
print("python ==> else1")
self.send_response(403)
self.send_header('Content-Type', 'application/json')
self.end_headers()
print("PYTHON ######## REQUEST ####### ENDED")
return
def do_GET(self):
print("PYTHON ######## REQUEST ####### STARTED")
if None != re.search('/AION/predict', self.path):
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
features = featureslist.getfeatures()
displaymsg = 'Data in JSON Format'
config = json.loads(features)
features = config['features']
datajson={}
for feature in features:
if feature['Type'].lower() != 'target':
datajson[feature['feature']] = 'Value'
displaymsg = json.dumps(datajson)
msg="""
URL:{url}
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
""".format(url=self.path,displaymsg=displaymsg)
self.wfile.write(msg.encode())
elif None != re.search('/AION/monitoring', self.path):
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
displaymsg='{"trainingDataLocation":"Reference Data File Path","currentDataLocation":"Latest Data File Path"}'
msg="""
URL:{url}
RequestType: POST
Content-Type=application/json
Body: {displaymsg}""".format(url=self.path,displaymsg=displaymsg)
self.wfile.write(msg.encode())
elif None != re.search('/AION/features', self.path):
outputStr = featureslist.getfeatures()
outputStr = outputStr.strip()
resp = outputStr
resp=resp+"\\n"
resp=resp.encode()
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(resp)
else:
msg="""
URL for prediction: /AION/predict
URL for features List: /AION/features
URL for monitoring: /AION/monitoring
URL for performance: /AION/performance"""
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(msg.encode())
return
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
allow_reuse_address = True
def shutdown(self):
self.socket.close()
HTTPServer.shutdown(self)
class SimpleHttpServer():
def __init__(self, ip, port):
self.server = ThreadedHTTPServer((ip,port), HTTPRequestHandler)
def start(self):
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def waitForThread(self):
self.server_thread.join()
def stop(self):
self.server.shutdown()
self.waitForThread()
if __name__=='__main__':
parser = argparse.ArgumentParser(description='HTTP Server')
parser.add_argument('port', type=int, help='Listening port for HTTP Server')
parser.add_argument('ip', help='HTTP Server IP')
args = parser.parse_args()
server = SimpleHttpServer(args.ip, args.port)
#delete file
#create file
#write just msg as "started"
print('HTTP Server Running...........')
#file close
server.start()
server.waitForThread()
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> from script.inputprofiler import inputprofiler
def preprocessing(data):
profilerobj = inputprofiler()
data = profilerobj.run(data)
data = data.astype(np.float64)
return(data)
import matplotlib.pyplot as plt
try:
from sklearn.externals import joblib
except:
import joblib
import os,sys
import pandas as pd
from alibi.explainers import ALE,plot_ale
import io
import json
import urllib, base64
import numpy as np
from scipy.stats import linregress
from statistics import mean
def get_ranked_values(explanation):
ranked_shap_vals = []
for cls_idx in range(len(explanation.shap_values)):
this_ranking = (
explanation.raw['importances'][str(cls_idx)]['ranked_effect'],
explanation.raw['importances'][str(cls_idx)]['names']
)
ranked_shap_vals.append(this_ranking)
return ranked_shap_vals
def feature_importance_using_shap(model,X,featuresNames,classes,x_test,x_test_waterfall):
from alibi.explainers import KernelShap
import shap
shap.initjs()
if hasattr(model, "decision_function"):
pred_fcn = model.decision_function
elif hasattr(model, "predict_proba"):
pred_fcn = model.predict_proba
else:
pred_fcn = model.predict
try:
svm_explainer = KernelShap(pred_fcn,feature_names=featuresNames)
xtest = x_test[0].reshape(1, -1)
svm_explainer.fit(X,n_background_samples=100)
svm_explanation = svm_explainer.explain(xtest)
try:
idx = 0
instance = x_test[0][None, :]
pred = model.predict(instance)
class_idx = pred.item()
if isinstance(svm_explainer.expected_value,np.ndarray):
forceplot = shap.force_plot(svm_explainer.expected_value[class_idx],svm_explanation.shap_values[class_idx][idx,:],instance,feature_names=featuresNames,matplotlib=True,show=False)
else:
forceplot = shap.force_plot(svm_explainer.expected_value,svm_explanation.shap_values[0][idx,:],instance,feature_names=featuresNames,matplotlib=True,show=False)
plt.tight_layout(pad = 0)
image = io.BytesIO()
plt.savefig(image, format='png')
image.seek(0)
string = base64.b64encode(image.read())
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
except Exception as inst:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
image_64 = ''
try:
plt.clf()
if isinstance(svm_explainer.expected_value,np.ndarray):
r = shap.multioutput_decision_plot(svm_explainer.expected_value.tolist(),
svm_explanation.shap_values,
idx,
feature_names=featuresNames,
feature_order='importance',
highlight=[class_idx],
legend_labels=classes,
return_objects=True,
legend_location='lower right',show=False)
else:
expectedvalues = [svm_explainer.expected_value]
r = shap.multioutput_decision_plot(expectedvalues,
svm_explanation.shap_values,
idx,
feature_names=featuresNames,
highlight = [0],
return_objects=True,
legend_labels=['Value'],
feature_order='importance',
show=False)
plt.tight_layout(pad = 0)
image = io.BytesIO()
plt.savefig(image, format='png')
image.seek(0)
string = base64.b64encode(image.read())
image2_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
except Exception as inst:
print(inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
image2_64 = ''
except Exception as inst:
print(inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
image2_64 = ''
image_64 = ''
try:
plt.clf()
x_test_waterfall = x_test_waterfall[featuresNames]
explainer = shap.Explainer(model.predict, x_test_waterfall, feature_names=featuresNames)
shap_values = explainer(x_test)
r = shap.plots.waterfall(shap_values[0], |
show=False)
image = io.BytesIO()
plt.savefig(image, format='png', bbox_inches='tight')
image.seek(0)
string = base64.b64encode(image.read())
image3_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
except Exception as inst:
print(inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
image3_64 = ''
return (image_64, image2_64, image3_64)
def feature_importance(xtrain,ytrain,xfeatures,yfeature,problemType):
if problemType == 'classification':
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import ExtraTreesClassifier
selector = SelectFromModel(ExtraTreesClassifier())
selector.fit(xtrain,ytrain)
values = selector.estimator_.feature_importances_
elif problemType == 'regression':
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import Lasso
selector = SelectFromModel(Lasso())
selector.fit(xtrain,ytrain)
values = np.abs(selector.estimator_.coef_)
labels = xtrain.columns.tolist()
dft = pd.DataFrame()
dft['labels'] = labels
dft['values'] = values
maxrecord = dft.iloc[dft['values'].argmax()]
mostimportantfeature = maxrecord['labels']
f_imp = dft.to_json(orient='records')
return(f_imp,mostimportantfeature)
def get_trust_score(prdictfn,proba_fun,X_train,y_train):
from alibi.confidence import TrustScore
ts = TrustScore(k_filter=10,alpha=.05,filter_type='distance_knn',leaf_size=40,metric='euclidean',dist_filter_type='point')
ts.fit(X_train, y_train, classes=3)
y_pred = prdictfn(X_train)
#y_prod = proba_fun(X_train)
#probas = y_prod[range(len(y_pred)), y_pred]
score, closest_class = ts.score(X_train, y_pred,k=2,dist_type='point')
return(mean(score))
def getCounterFactuals(model,prdictfn,features,x_train,categories):
from alibi.explainers import CounterFactualProto
cat_vars_ord = {}
categoryList=categories.keys().tolist()
categoryCountList=categories.tolist()
for i in range(0,len(categoryCountList)):
cat_vars_ord[categoryList[i]] = categoryCountList[i]
print(cat_vars_ord)
X = x_train[0].reshape((1,) + x_train[0].shape)
shape = X.shape
print(shape)
beta = .01
c_init = 1.
c_steps = 5
max_iterations = 500
rng = (-1., 1.) # scale features between -1 and 1
feature_range = (x_train.min(axis=0), x_train.max(axis=0))
cf = CounterFactualProto(prdictfn,shape,cat_vars=cat_vars_ord)
explanation = cf.explain(X)
print(explanation)
def getAnchorTabularofFirstRecord(predict_fn,features,X_train,X_test,labelMap, class_percent=None):
threshold = 0.95
from alibi.explainers import AnchorTabular
explainer = AnchorTabular(predict_fn, features)
explainer.fit(X_train.values)
X_test = X_test.values
anchors = []
for idx in range(len(X_test)):
prediction = explainer.predictor(X_test[idx].reshape(1, -1))[0]
if len(labelMap) > 0:
predictionstr = list(labelMap.keys())[list(labelMap.values()).index(prediction)]
else:
predictionstr = prediction
explanation = explainer.explain(X_test[idx],threshold=threshold)
if str(explanation.anchor) == '[]':
if class_percent and class_percent.get(prediction, 0.0) > threshold:
anchor = f"Cannot explain the prediction of this class ({predictionstr}) since there is no salient subset of features that is necessary for the prediction to hold. This behaviour is typical when the data is very imbalanced and is seen for the majority class in a classification problem."
else:
anchor = f'Can not get the explanation for {predictionstr}.'
precision = explanation.precision[0]
else:
anchor = '%s' % (' AND '.join(explanation.anchor))
precision = explanation.precision
coverage = explanation.coverage
anchorjson = {}
anchorjson['features'] = eval(str(features))
anchorjson['values'] = eval(str(list(X_test[idx])))
anchorjson['prediction'] = str(predictionstr)
anchorjson['precision'] = str(round(precision,2))
anchorjson['anchor'] = anchor
anchors.append(anchorjson)
print(anchors)
try:
return(json.dumps(anchors))
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
return(json.dumps({}))
def ale_analysis():
displaypath = os.path.join(os.path.dirname(os.path.abspath(__file__)),"etc","display.json")
with open(displaypath) as file:
config = json.load(file)
file.close()
model = joblib.load(os.path.join(os.path.dirname(os.path.abspath(__file__)),"model",config['saved_model']))
predict_fn = lambda x: model.predict(x)
predictproba_fn = lambda x: model.predict_proba(x)
dathPath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'data','postprocesseddata.csv.gz')
dataFrame=pd.read_csv(dathPath,compression='gzip')
#dataFrame = pd.read_csv(dathPath)
testdathPath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'data','predicteddata.csv.gz')
testdataFrame=pd.read_csv(testdathPath,compression='gzip')
#testdataFrame = pd.read_csv(testdathPath)
features = config['modelFeatures']#['SepalWidthCm','PetalLengthCm']
targetfeature = config['targetFeature']#'Species'
labelMap = config['labelMaps']
targetData = dataFrame[targetfeature]
if config['problemType'].lower() == 'regression':
X_train = dataFrame[features]
X_test = testdataFrame.head(5)
X_test = X_test[features]
else:
valueCount=targetData.value_counts()
class_percent = (valueCount/ len(targetData)).to_dict()
categoryList=valueCount.keys().tolist()
class_names = categoryList
X_train = dataFrame[features]
X_test = testdataFrame.groupby('predict').first().reset_index()
X_test = X_test[features]
f_imp,m_imp_f = feature_importance(X_train,targetData,features,targetfeature,config['problemType'].lower())
if hasattr(model, "decision_function"):
logit_fun_lr = model.decision_function
try:
logit_ale_lr = ALE(logit_fun_lr, feature_names=features, target_names=categoryList)
logit_exp_lr = logit_ale_lr.explain(X_train.values)
values = logit_exp_lr.ale_values
feature = logit_exp_lr.feature_names
feature_values = logit_exp_lr.feature_values
lines= []
sentences = []
for x in range(0,len(feature)):
f_value = feature_values[x]
value = values[x]
lines= []
for y in range(0,len(class_names)):
line = []
for z in value:
cordinate = z[y]
line.append(cordinate)
lines.append(line)
line = lines[0]
slope1, intercept1, r_value, p_value, std_err = linregress(f_value,line)
line = lines[1]
slope2, intercept2, r_value, p_value, std_err = linregress(f_value,line)
xi = (intercept1-intercept2) / (slope2-slope1)
xi = round(xi,2)
lastvalues = {}
i = 0
for line in lines:
value = line[len(line)-1]
lastvalues[class_names[i]] = value
i = i+1
Keymax = max(lastvalues, key=lastvalues.get)
Keymin = min(lastvalues, key=lastvalues.get)
Keymaxclass = list(labelMap.keys())[list(labelMap.values()).index(Keymax)]
Keyminclass = list(labelMap.keys())[list(labelMap.values()).index(Keymin)]
sentense = '<b>Effect of '+str(feature[x])+'</b><br>For data samples having <b>'+str(feature[x])+'</b> >= <b>~'+str(xi)+'</b> ,there is a very high chance that they are of class <b>'+str(Keymaxclass)+'</b> '+targetfeature+'. For data samples having <b>'+str(feature[x])+'</b> < <b>~'+str(xi)+'</b> there is a very high change that they are of class <b>'+str(Keyminclass)+'</b> '+targetfeature+'.'
sentences.append(sentense)
except:
sentense = ''
sentences.append(sentense)
xi = 0
elif hasattr(model, "predict_proba"):
logit_fun_lr = model.predict_proba
logit_ale_lr = ALE(logit_fun_lr, feature_names=features, target_names=categoryList)
logit_exp_lr = logit_ale_lr.explain(X_train.values)
values = logit_exp_lr.ale_values
feature = logit_exp_lr.feature_names
feature_values = logit_exp_lr.feature_values
lines= []
sentences = []
sentense = 'Graphs gives a feature value how much more(less) probability does the model assign to each class relative to mean prediction. This also means that any increase in relative probability of one class must result into a decrease in probability of another class.'
sentences.append(sentense)
xi = 0
elif hasattr(model, "predict"):
logit_fun_lr = model.predict
logit_ale_lr = ALE(logit_fun_lr, feature_names=features, target_names=['Value'])
logit_exp_lr = logit_ale_lr.explain(X_train.values)
values = logit_exp_lr.ale_values
feature = logit_exp_lr.feature_names
feature_values = logit_exp_lr.feature_values
lines= []
sentences = []
sentense = 'The ALE value corresponding to that feature value is difference to the mean effect of that feature. Put differently, the ALE value is the relative feature effect on the prediction at that feature value.'
sentences.append(sentense)
xi = 0
if (len(features)%2 ==0):
n_cols = int(len(features)/2)
else:
n_cols = int(len(features)/2)+1
figheight = n_cols*3
try:
plot_ale(logit_exp_lr,n_cols=2, fig_kw={'figwidth': 8, 'figheight': figheight})
plt.tight_layout(pad = 0)
image = io.BytesIO()
plt.savefig(image, format='png')
image.seek(0)
string = base64.b64encode(image.read())
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
except:
image_64 = ''
#score = get_trust_score(model.predict,proba_fun_lr,X_train.values,targetData.values)
if config['problemType'].lower() == 'classification':
anchorjson = getAnchorTabularofFirstRecord(predict_fn,features,X_train,X_test,labelMap,class_percent)
else:
anchorjson = getAnchorTabularofFirstRecord(predict_fn,features,X_train,X_test,labelMap)
#anchors=[]
#anchorjson = json.dumps(anchors)
#feature_importance_using_shap(model,X_train.values,features,class_names)
#getCounterFactuals(model,predictproba_fn,features,X_train.values,valueCount)
output_json = {"status":"SUCCESS","data":{"data":image_64,"most_influencedfeature":m_imp_f,"interceptionpoint":xi,"sentences":sentences,"feature_importance":json.loads(f_imp),"anchorjson":json.loads(anchorjson)}}
output_json = json.dumps(output_json)
print("aion_ai_explanation:",output_json)
return(output_json)
def local_analysis(jsonData):
jsonData = json.loads(jsonData)
displaypath = os.path.join(os.path.dirname(os.path.abspath(__file__)),"etc","display.json")
with open(displaypath) as file:
config = json.load(file)
file.close()
model = joblib.load(os |
.path.join(os.path.dirname(os.path.abspath(__file__)),"model",config['saved_model']))
predict_fn = lambda x: model.predict(x)
dathPath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'data','postprocesseddata.csv.gz')
dataFrame=pd.read_csv(dathPath,compression='gzip')
testdathPath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'predicteddata.csv.gz')
testdataFrame = pd.read_csv(testdathPath, compression='gzip')
features = config['modelFeatures']#['SepalWidthCm','PetalLengthCm']
targetfeature = config['targetFeature']#'Species'
targetData = dataFrame[targetfeature]
valueCount=targetData.value_counts()
categoryList=valueCount.keys().tolist()
class_names = categoryList
#class_names = class_names.sort()
X_train = dataFrame[features]
from pandas import json_normalize
df_test = json_normalize(jsonData)
df_test = preprocessing(df_test)
df_test = df_test[features]
from alibi.explainers import AnchorTabular
explainer = AnchorTabular(predict_fn, features)
explainer.fit(X_train.values)
df_test = df_test.values
prediction = explainer.predictor(df_test.reshape(1, -1))[0]
labelMap = config['labelMaps']
if len(labelMap) > 0:
prediction = list(labelMap.keys())[list(labelMap.values()).index(prediction)]
else:
prediction = str(prediction)
try:
explanation = explainer.explain(df_test,threshold=0.85)
if str(explanation.anchor) == '[]':
anchor = 'NA'
precision = str(round(explanation.precision[0],2))
else:
anchor = '%s' % (' AND '.join(explanation.anchor))
precision = str(round(explanation.precision,2))
coverage = explanation.coverage
except Exception as e:
print(e)
anchor = 'NA'
precision = 0
coverage = 0
df_test_waterfall = testdataFrame
forceplot,multidecisionplot,waterfallplot = feature_importance_using_shap(model,X_train.head(300).values,features,class_names,df_test,df_test_waterfall)
output_json = {"status":"SUCCESS","data":{"anchor":anchor,"precision":precision,"coverage":coverage,"prediction":prediction,"forceplot":forceplot,"multidecisionplot":multidecisionplot,"waterfallplot":waterfallplot}}
#print(output_json)
output_json = json.dumps(output_json)
print("aion_ai_explanation:",output_json)
return(output_json)
if __name__ == '__main__':
analysis_type = sys.argv[1]
if analysis_type.lower() == 'global':
ale_analysis()
if analysis_type.lower() == 'local':
data = sys.argv[2]
local_analysis(data)<s> from script.inputprofiler import inputprofiler
def preprocessing(data):
profilerobj = inputprofiler()
data = profilerobj.run(data)
data = data.astype(np.float64)
return(data)
import matplotlib.pyplot as plt
try:
from sklearn.externals import joblib
except:
import joblib
import os,sys
import pandas as pd
from alibi.explainers import ALE,plot_ale
import io
import json
import urllib, base64
import numpy as np
from scipy.stats import linregress
from statistics import mean
from tensorflow.keras.models import load_model
from tensorflow.keras import backend as K
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
def rmse_m(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
def r_square(y_true, y_pred):
SS_res = K.sum(K.square(y_true-y_pred))
SS_tot = K.sum(K.square(y_true-K.mean(y_true)))
return (1 - SS_res/(SS_tot+K.epsilon()))
def get_ranked_values(explanation):
ranked_shap_vals = []
for cls_idx in range(len(explanation.shap_values)):
this_ranking = (
explanation.raw['importances'][str(cls_idx)]['ranked_effect'],
explanation.raw['importances'][str(cls_idx)]['names']
)
ranked_shap_vals.append(this_ranking)
return ranked_shap_vals
def feature_importance_using_shap(model,X,featuresNames,classes,x_test,problemType,modelname,x_test_waterfall):
from alibi.explainers import KernelShap
import shap
shap.initjs()
if hasattr(model, "decision_function") and problemType.lower() == 'classification':
pred_fcn = model.decision_function
elif hasattr(model, "predict_proba") and problemType.lower() == 'classification':
pred_fcn = lambda x: model.predict_proba(np.expand_dims(x, axis=2))
else:
if modelname == 'Neural Network':
pred_fcn = lambda x: model.predict(x)
else:
pred_fcn = lambda x: model.predict(np.expand_dims(x, axis=2))
svm_explainer = KernelShap(pred_fcn,feature_names=featuresNames)
xtest = x_test[0].reshape(1, -1)
svm_explainer.fit(X,n_background_samples=100)
svm_explanation = svm_explainer.explain(xtest)
try:
idx = 0
instance = x_test[0][None, :]
if problemType.lower() == 'classification':
if modelname == 'Neural Network':
instance = x_test
else:
instance = np.expand_dims(x_test, axis=2)
pred = np.argmax(model.predict(instance),axis=1)
class_idx = pred.item()
else:
instance = np.expand_dims(x_test, axis=2)
pred = model.predict(instance)
class_idx = 0
if isinstance(svm_explainer.expected_value,np.ndarray):
forceplot = shap.force_plot(svm_explainer.expected_value[class_idx],svm_explanation.shap_values[class_idx][idx,:],instance,feature_names=featuresNames,matplotlib=True,show=False)
else:
forceplot = shap.force_plot(svm_explainer.expected_value,svm_explanation.shap_values[0][idx,:],instance,feature_names=featuresNames,matplotlib=True,show=False)
plt.tight_layout(pad = 0)
image = io.BytesIO()
plt.savefig(image, format='png')
image.seek(0)
string = base64.b64encode(image.read())
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
except Exception as inst:
print(inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
image_64 = ''
try:
plt.clf()
if isinstance(svm_explainer.expected_value,np.ndarray):
r = shap.multioutput_decision_plot(svm_explainer.expected_value.tolist(),
svm_explanation.shap_values,
idx,
feature_names=featuresNames,
feature_order='importance',
highlight=[class_idx],
legend_labels=classes,
return_objects=True,
legend_location='lower right',show=False)
else:
expectedvalues = [svm_explainer.expected_value]
r = shap.multioutput_decision_plot(expectedvalues,
svm_explanation.shap_values,
idx,
feature_names=featuresNames,
highlight = [0],
return_objects=True,
legend_labels=['Value'],
feature_order='importance',
show=False)
plt.tight_layout(pad = 0)
image = io.BytesIO()
plt.savefig(image, format='png')
image.seek(0)
string = base64.b64encode(image.read())
image2_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
except Exception as inst:
print(inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
image2_64 = ''
try:
plt.clf()
explainer = shap.DeepExplainer(model, X)
shap_values = explainer.shap_values(x_test_waterfall.values)
exp = shap.plots._waterfall.waterfall_legacy(explainer.expected_value[0].numpy(), shap_values[0][0],feature_names=featuresNames,show=False)
image = io.BytesIO()
plt.savefig(image, format='png', bbox_inches='tight')
image.seek(0)
string = base64.b64encode(image.read())
image3_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
except Exception as inst:
print(inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
image3_64 = ''
return(image_64,image2_64,image3_64)
def feature_importance(xtrain,ytrain,xfeatures,yfeature,problemType):
if problemType == 'classification':
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import ExtraTreesClassifier
selector = SelectFromModel(ExtraTreesClassifier())
selector.fit(xtrain,ytrain)
values = selector.estimator_.feature_importances_
elif problemType == 'regression':
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import Lasso
selector = SelectFromModel(Lasso())
selector.fit(xtrain,ytrain)
values = np.abs(selector.estimator_.coef_)
labels = xtrain.columns.tolist()
dft = pd.DataFrame()
dft['labels'] = labels
dft['values'] = values
maxrecord = dft.iloc[dft['values'].argmax()]
mostimportantfeature = maxrecord['labels']
f_imp = dft.to_json(orient='records')
return(f_imp,mostimportantfeature)
def get_trust_score(prdictfn,proba_fun,X_train,y_train):
from alibi.confidence import TrustScore
ts = TrustScore(k_filter=10,alpha=.05,filter_type='distance_knn',leaf_size=40,metric='euclidean',dist_filter_type='point')
ts.fit(X_train, y_train, classes=3)
y_pred = prdictfn(X_train)
#y_prod = proba_fun(X_train)
#probas = y_prod[range(len(y_pred)), y_pred]
score, closest_class = ts.score(X_train, y_pred,k=2,dist_type='point')
return(mean(score))
def getCounterFactuals(model,prdictfn,features,x_train,categories):
from alibi.explainers import CounterFactualProto
cat_vars_ord = {}
categoryList=categories.keys().tolist()
categoryCountList=categories.tolist()
for i in range(0,len(categoryCountList)):
cat_vars_ord[categoryList[i]] = categoryCountList[i]
print(cat_vars_ord)
X = x_train[0].reshape((1,) + x_train[0].shape)
shape = X.shape
print(shape)
beta = .01
c_init = 1.
c_steps = 5
max_iterations = 500
rng = (-1., 1.) # scale features between -1 and 1
feature_range = (x_ |
train.min(axis=0), x_train.max(axis=0))
cf = CounterFactualProto(prdictfn,shape,cat_vars=cat_vars_ord)
explanation = cf.explain(X)
print(explanation)
def getAnchorTabularofFirstRecord(predict_fn,features,X_train,X_test,labelMap, class_percent=None):
threshold = 0.95
from alibi.explainers import AnchorTabular
explainer = AnchorTabular(predict_fn, features)
explainer.fit(X_train.values)
X_test = X_test.values
anchors = []
for idx in range(len(X_test)):
prediction = explainer.predictor(X_test[idx].reshape(1, -1))[0]
if isinstance(prediction,np.ndarray):
prediction = prediction[0]
if len(labelMap) > 0:
predictionstr = list(labelMap.keys())[list(labelMap.values()).index(prediction)]
else:
predictionstr = str(prediction)
try:
explanation = explainer.explain(X_test[idx],threshold=threshold)
if str(explanation.anchor) == '[]':
if class_percent and class_percent.get(prediction, 0.0) > threshold:
anchor = f"Cannot explain the prediction of this class ({predictionstr}) since there is no salient subset of features that is necessary for the prediction to hold. This behaviour is typical when the data is very imbalanced and is seen for the majority class in a classification problem."
else:
anchor = f'Can not get the explaination for {predictionstr}.'
precision = explanation.precision[0]
else:
anchor = '%s' % (' AND '.join(explanation.anchor))
precision = explanation.precision
coverage = explanation.coverage
except Exception as inst:
print(inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
anchor = 'Reason Not found'
precision = 0
anchorjson = {}
anchorjson['features'] = eval(str(features))
anchorjson['values'] = eval(str(list(X_test[idx])))
anchorjson['prediction'] = predictionstr
anchorjson['precision'] = precision
anchorjson['anchor'] = anchor
anchors.append(anchorjson)
return(json.dumps(anchors))
def ale_analysis():
displaypath = os.path.join(os.path.dirname(os.path.abspath(__file__)),"etc","display.json")
with open(displaypath) as file:
config = json.load(file)
file.close()
scoreParam = config['scoreParam']
filename_dl = os.path.join(os.path.dirname(os.path.abspath(__file__)),"model",config['saved_model'])
if(scoreParam.lower() == 'rmse'):
model = load_model(filename_dl,custom_objects={"rmse": rmse_m},compile=False)
model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[rmse_m])
elif(scoreParam.lower() == 'r2'):
model = load_model(filename_dl,custom_objects={"r2": r_square},compile=False)
model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[r_square])
elif(scoreParam.lower() == 'recall'):
model = load_model(filename_dl,custom_objects={"recall": recall_m},compile=False)
model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[recall_m])
elif(scoreParam.lower() == 'precision'):
model = load_model(filename_dl,custom_objects={"precision": precision_m},compile=False)
model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[precision_m])
elif(scoreParam.lower() == 'roc_auc'):
model = load_model(filename_dl,compile=False)
model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[tf.keras.metrics.AUC()])
elif(scoreParam.lower() == 'f1_score'):
model = load_model(filename_dl,custom_objects={"f1_score": f1_m},compile=False)
model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[f1_m])
else:
model = load_model(filename_dl)
if config['modelname'] == 'Neural Network':
predict_fn = lambda x: model.predict(x)
else:
predict_fn = lambda x: model.predict(np.expand_dims(x, axis=2))
predictproba_fn = lambda x: model.predict_proba(np.expand_dims(x, axis=2))
dathPath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'data','postprocesseddata.csv.gz')
dataFrame=pd.read_csv(dathPath,compression='gzip')
testdathPath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'data','predicteddata.csv.gz')
testdataFrame=pd.read_csv(testdathPath,compression='gzip')
features = config['modelFeatures']#['SepalWidthCm','PetalLengthCm']
targetfeature = config['targetFeature']#'Species'
labelMap = config['labelMaps']
targetData = dataFrame[targetfeature]
if config['problemType'].lower() == 'regression':
X_train = dataFrame[features]
X_test = testdataFrame.head(5)
X_test = X_test[features]
else:
valueCount=targetData.value_counts()
class_percent = (valueCount/ len(targetData)).to_dict()
categoryList=valueCount.keys().tolist()
class_names = categoryList
X_train = dataFrame[features]
X_test = testdataFrame.groupby('predict').first().reset_index()
X_test = X_test[features]
f_imp,m_imp_f = feature_importance(X_train,targetData,features,targetfeature,config['problemType'].lower())
if hasattr(model, "decision_function") and config['problemType'].lower() == 'classification':
logit_fun_lr = model.decision_function
try:
logit_ale_lr = ALE(logit_fun_lr, feature_names=features, target_names=categoryList)
logit_exp_lr = logit_ale_lr.explain(X_train.values)
values = logit_exp_lr.ale_values
feature = logit_exp_lr.feature_names
feature_values = logit_exp_lr.feature_values
lines= []
sentences = []
for x in range(0,len(feature)):
f_value = feature_values[x]
value = values[x]
lines= []
for y in range(0,len(class_names)):
line = []
for z in value:
cordinate = z[y]
line.append(cordinate)
lines.append(line)
line = lines[0]
slope1, intercept1, r_value, p_value, std_err = linregress(f_value,line)
line = lines[1]
slope2, intercept2, r_value, p_value, std_err = linregress(f_value,line)
xi = (intercept1-intercept2) / (slope2-slope1)
xi = round(xi,2)
lastvalues = {}
i = 0
for line in lines:
value = line[len(line)-1]
lastvalues[class_names[i]] = value
i = i+1
Keymax = max(lastvalues, key=lastvalues.get)
Keymin = min(lastvalues, key=lastvalues.get)
Keymaxclass = list(labelMap.keys())[list(labelMap.values()).index(Keymax)]
Keyminclass = list(labelMap.keys())[list(labelMap.values()).index(Keymin)]
sentense = '<b>Effect of '+str(feature[x])+'</b><br>For data samples having <b>'+str(feature[x])+'</b> >= <b>~'+str(xi)+'</b> ,there is a very high chance that they are of class <b>'+str(Keymaxclass)+'</b> '+targetfeature+'. For data samples having <b>'+str(feature[x])+'</b> < <b>~'+str(xi)+'</b> there is a very high change that they are of class <b>'+str(Keyminclass)+'</b> '+targetfeature+'.'
sentences.append(sentense)
except:
sentense = ''
sentences.append(sentense)
xi = 0
elif hasattr(model, "predict_proba") and config['problemType'].lower() == 'classification':
logit_fun_lr = lambda x: model.predict_proba(np.expand_dims(x, axis=2))
logit_ale_lr = ALE(logit_fun_lr, feature_names=features, target_names=categoryList)
print(model.__class__)
try:
logit_exp_lr = logit_ale_lr.explain(X_train.values)
except:
X = np.expand_dims(X_train, axis=2)
logit_exp_lr = logit_ale_lr.explain(X)
values = logit_exp_lr.ale_values
feature = logit_exp_lr.feature_names
feature_values = logit_exp_lr.feature_values
lines= []
sentences = []
sentense = 'Graphs gives a feature value how much more(less) probability does the model assign to each class relative to mean prediction. This also means that any increase in relative probability of one class must result into a decrease in probability of another class.'
sentences.append(sentense)
xi = 0
elif hasattr(model, "predict"):
try:
if config['modelname'] == 'Neural Network':
logit_fun_lr = lambda x: model.predict(x)
else:
logit_fun_lr = lambda x: model.predict(np.expand_dims(x, axis=2))
logit_ale_lr = ALE(logit_fun_lr, feature_names=features, target_names=['Value'])
logit_exp_lr = logit_ale_lr.explain(X_train.values)
values = logit_exp_lr.ale_values
feature = logit_exp_lr.feature_names
feature_values = logit_exp_lr.feature_values
lines= []
sentences = []
sentense = 'The ALE value corresponding to that feature value is difference to the mean effect of that feature. Put differently, the ALE value is the relative feature effect on the prediction at that feature value.'
sentences.append(sentense)
xi = 0
except:
xi = 0
sentences = []
if (len(features)%2 ==0):
n_cols = int(len(features)/2)
else:
n_cols = int(len(features)/2)+1
figheight = n_cols*3
try:
plot_ale(logit_exp_lr,n_cols=2, fig_kw={'figwidth': 8, 'figheight': figheight})
plt.tight_layout(pad = 0)
image = io.BytesIO()
plt.savefig(image, format='png')
image.seek(0)
string = base64.b64encode(image.read())
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
except:
image_64 = ''
#score = get_trust_score(model.predict,proba_fun_lr,X_train.values,targetData.values)
if config['problemType'].lower() == 'classification':
anchorjson = getAnchorTabularofFirstRecord(predict_fn,features,X_train,X_test,labelMap,class_percent)
else:
anchorjson = getAnchorTabularofFirstRecord(predict_fn,features,X_train,X_test,labelMap)
#anchors=[]
#anchorjson = json.dumps(anchors)
#feature_importance_using_shap(model,X_train.values,features,class_names)
#getCounterFactuals(model,predictproba_fn,features,X_train.values,valueCount)
output_json = {"status":"SUCCESS","data":{"data":image_64,"most_influencedfeature":m_imp_f,"interceptionpoint":xi,"sentences":sentences,"feature_importance":json.loads(f_imp),"anchorjson":json.loads(anchorjson)}}
output_json = json.dumps(output_json)
print("aion_ai_explanation:",output_json)
return(output_json)
def local_analysis(jsonData):
jsonData = json.loads(jsonData)
displaypath = os.path.join(os.path.dirname(os.path.abspath(__file__)),"etc","display.json")
with open(displaypath) as file:
config = json.load(file)
file.close()
scoreParam = config['scoreParam']
filename_dl = os.path.join(os.path.dirname(os.path.abspath(__file__)),"model",config['saved_model'])
if(scoreParam.lower() == 'rmse'):
model = load_model(filename_dl,custom_objects={"rmse": rmse_m},compile=False)
model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[rmse_m])
elif(scoreParam.lower() == 'r2'):
model = load_model(filename_dl,custom_objects={"r2": r_square},compile=False)
model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[r_square])
elif(scoreParam.lower() |
== 'recall'):
model = load_model(filename_dl,custom_objects={"recall": recall_m},compile=False)
model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[recall_m])
elif(scoreParam.lower() == 'precision'):
model = load_model(filename_dl, |
processed "+str(self.num_records)+".-- Loss: "+str(loss)+". -- accuracy: "+str(accuracy))
logger.info("FL Client model intercept: "+str(model.intercept_))
logger.info("FL Client model coefficients: "+str(model.coef_))
self.model_save(self.model)
return loss, len(self.X_test), {"accuracy": accuracy}
elif (self.problem_type.lower() == 'regression'):
if (self.model_name.lower() == 'linearregression' ):
# loss = log_loss(self.y_test, self.model.predict(self.X_test))
mse=mean_squared_error(self.y_test, self.model.predict(self.X_test))
rmse = np.sqrt(mean_squared_error(self.y_test, self.model.predict(self.X_test)))
mae = mean_absolute_error(self.y_test, self.model.predict(self.X_test))
r2=r2_score(self.y_test, self.model.predict(self.X_test))
loss = rmse
results = {
"mean_absolute_error": mae,
"mean_squared_error": mse,
"root_mean_squared_error": rmse,
"r2":r2,
}
print(f"{self.client_id} Sending weights -- data processed {self.num_records}, -- Loss: {(rmse)}. -- metrics: {results}. ")
logger.info(str(self.client_id)+" Sending weights -- data processed "+str(self.num_records)+".-- Loss: "+str(rmse)+". -- metrics: "+str(results))
logger.info("FL Client model intercept: "+str(self.model.intercept_))
logger.info("FL Client model coefficients: "+str(self.model.coef_))
self.model_save(self.model)
return loss, len(self.X_test), results
""" The below aion fl client is for deep learning process.
Why different client for sklearn and deeplearn ?: Because, flower calling the client object and process all functions (get_parameters,fit and evaluate) internally. So, user space we cannot combine both (sklearn n dl) using if..else. """
class aionflc_dl(flower.client.NumPyClient):
def __init__(self,model,num_rounds,model_name,version,wait_time,client_id,num_records,model_overwrite,problem_type,X_train, X_test, y_train, y_test,model_params):
self.count=0
self.num_rounds=round(num_rounds)
self.model_name=model_name
self.version=version
self.wait_time=int(wait_time)
self.client_id=client_id
self.num_records=num_records
self.model_overwrite=model_overwrite
self.model=model
self.problem_type=problem_type
self.X_train, self.X_test, self.y_train, self.y_test=X_train, X_test, y_train, y_test
self.model_params=model_params
# """ The below part not used now. In future, for our own grpc communication, this module will be used.Call this function where we want. Need to modify aiongrpcproto.proto according our requirement."""
# def callaiongrpcclient(self):
# clientins = aiongrpcclient()
# status=clientins.startgrpcclient()
# return status
#Save the final model
def model_save(self,model):
##Locate standard model dir to save model
cwd = os.path.abspath(os.path.dirname(__file__))
model_location=os.path.join(cwd, 'models')
try:
os.makedirs(model_location)
except FileExistsError as fe:
# here,model_location already exists
pass
model_name=self.model_name
# version=self.version
## Saving model
if (self.model_overwrite.lower() == 'false'):
version=str(self.count)
if (model_name.lower() == "deeplearning"):
file_name=model_name+'_'+self.problem_type+'_'+version+".h5"
saved_model=os.path.normpath(os.path.join(model_location,file_name))
log(INFO, "flclient saved_model path: %s ",str(saved_model))
try:
model.save(saved_model)
return True
except Exception as e:
logger.info("model save error. Err.Msg: "+str(e))
return False
else:
file_name=model_name+'_'+self.problem_type+'_'+version+".sav"
saved_model=os.path.normpath(os.path.join(model_location,file_name))
log(INFO, "flclient saved_model path: %s ",str(saved_model))
try:
with open (saved_model,'wb') as f:
pkl.dump(model,f)
return True
except Exception as e:
logger.info("model save error. Err.Msg: "+str(e))
return False
elif (self.model_overwrite.lower() == 'true'):
version=str(self.version)
if (model_name.lower() == "deeplearning"):
file_name=model_name+'_'+self.problem_type+'_'+version+".h5"
saved_model=os.path.normpath(os.path.join(model_location,file_name))
log(INFO, "flclient saved_model path: %s ",str(saved_model))
try:
model.save(saved_model)
return True
except Exception as e:
logger.info("model save error. Err.Msg: "+str(e))
return False
else:
file_name=model_name+'_'+self.problem_type+'_'+version+".sav"
saved_model=os.path.normpath(os.path.join(model_location,file_name))
log(INFO, "flclient saved_model path: %s ",str(saved_model))
try:
with open (saved_model,'wb') as f:
pkl.dump(model,f)
return True
except Exception as e:
logger.info("model save error. Err.Msg: "+str(e))
return False
else:
##Write own user instruction
pass
def get_parameters(self, config):
"""Get parameters of the local model."""
return self.model.get_weights()
def get_properties(self,model,time_out):
"""Return the current client properties."""
client_info={'client_id':self.client_id}
time_out=100
return client_info,model,time_out
def fit(self, parameters, config):
"""Train parameters on the locally held training set."""
# Update local model parameters
self.model.set_weights(parameters)
num_partitions=(self.num_rounds)
# num_partitions=round(5)
xtrain=np.array_split(self.X_train, num_partitions)[self.count]
ytrain=np.array_split(self.y_train, num_partitions)[self.count]
# y_train = np_utils.to_categorical(y_train, num_classes)
# y_test = np_utils.to_categorical(y_test, num_classes)
# Get hyperparameters for this round
batch_size: int = int(self.model_params["batch_size"])
epochs: int = int(self.model_params["epochs"])
# round: int = config["rnd"]
# self.round_id = round
log(INFO, "===========================")
log(INFO, "Start training model on local client %s round %i",
self.client_id, config['rnd'])
time.sleep(self.wait_time)
self.count+=1
# Train the model using hyperparameters from config
history = self.model.fit(
xtrain,
ytrain,
batch_size,
epochs,
shuffle=False,
# validation_split=0.1,
validation_data=(self.X_test, self.y_test),
verbose=1
)
# Return updated model parameters and results
parameters_prime = self.model.get_weights()
num_examples_train = len(self.X_train)
model_name = self.model_name
problem_type = self.problem_type
if model_name == "deeplearning":
if problem_type == "classification":
acc = self.model.history.history['val_accuracy']
log(INFO, "Validated accuracy at the end of current round of client %s : %.2f %%",
self.client_id, acc[-1]*100)
log(INFO, "Finished training model on local client %s", self.client_id)
results = {
"loss": history.history["loss"][0],
"accuracy": history.history["accuracy"][0],
"val_loss": history.history["val_loss"][0],
"val_accuracy": history.history["val_accuracy"][0],
}
if problem_type == "regression":
mean_absolute_error = history.history['mean_absolute_error'][0]
mean_squared_error = history.history['mean_squared_error'][0]
y_pred = self.model.predict(self.X_test)
from sklearn import metrics
root_mean_squared_error = np.sqrt(metrics.mean_squared_error(self.y_test, y_pred))
log(INFO, "Mean Absolute Error at the end of current round of client %s : %f",
self.client_id, mean_absolute_error)
log(INFO, "Mean Squared Error at the end of current round of client %s : %f",
self.client_id, mean_squared_error)
log(INFO, "Root Mean Squared Error at the end of current round of client %s : %f",
self.client_id, root_mean_squared_error)
log(INFO, "Finished training model on local client %s", self.client_id)
results = {
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"root_mean_squared_error": root_mean_squared_error,
}
return parameters_prime, num_examples_train, results
def evaluate(self, parameters, config):
"""Evaluate parameters on the locally held test set."""
# Update local model with global parameters
self.model.set_weights(parameters)
num_partitions=(self.num_rounds)
# Get config values
# batch_size: int = config["val_batch_size"]
batch_size: int = int(self.model_params["batch_size"])
steps: int = np.ceil(len(self.X_test)/batch_size)
num_examples_test = len(self.X_test)
log(INFO, "Run for only %i steps", steps)
# Evaluate global model parameters on the local test data and return results
model_name = self.model_name
problem_type = self.problem_type
self.model_save(self.model)
if model_name == "deeplearning":
if problem_type == "classification":
loss, accuracy = self.model.evaluate(self.X_test, self.y_test,verbose=0)
log(INFO, "Client %s : Accuracy %.2f %%", self.client_id, accuracy*100)
log(INFO, "Client %s : Loss %.4f ", self.client_id, loss)
return loss, num_examples_test, {"accuracy": accuracy}
if problem_type == "regression":
loss, mean_absolute_error, mean_squared_error = self.model.evaluate(self.X_test, self.y_test,
steps=steps,verbose=1)
y_pred = self.model.predict(self.X_test)
root_mean_squared_error = np.sqrt(metrics.mean_squared_error(self.y_test, y_pred))
log(INFO, "Client %s : mean_absolute_error %f ",
self.client_id, mean_absolute_error)
log(INFO, "Client %s : mean_squared_error %f ",
self.client_id, mean_squared_error)
log(INFO, "Client %s : root_mean_squared_error %f ",
self.client_id, root_mean_squared_error)
return loss, num_examples_test, {"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"root_mean_squared_error": root_mean_squared_error}
def randclientid(s,c):
c=string.ascii_uppercase + string.digits
return ''.join(random.choice(c) for x in range(s))
## Loading input data
def dataLoad(jsonfile):
with open(jsonfile, 'r') as file:
data = json.load(file)
server_ip=str(data["server_IP"])
server_port=str(data["server_port"])
model_name=str(data["model_name"])
problem_type=str(data["problem_type"])
data_location=str(data["data_location"])
# deploy_location=str(data["deploy_location"])
model_params=data["model_hyperparams"]
train_size=int(data["train_size"])
model_version=str(data["version"])
selected_feature=data["selected_feature"]
if (type(selected_feature) is str):
selected_feature=selected_feature.split(',')
model_overwrite=data['model_overwrite']
target_feature=data["target_feature"]
num_records=int(data['num_records_per_round'])
wait_time=data['wait_time']
server_address=server_ip+':'+server_port
# server_address=f"{server_ip}:{server_port}"
return server_address,model_name,problem_type,data_location,model_params,model_version,selected_feature,target_feature,train_size,num_records,wait_time,model_overwrite
# def getfilepath()
""" Main aion federated learning client function call. """
if __name__ == "__main__":
##Client random id gen.
rand_id=randclientid(9, "ABC1234567890")
client_id='flclient-'+str(rand_id)
try:
json_file=sys.argv[1]
except Exception as e:
# sys.stdout.write("Please provide input configuration file. example: < python.exe 'fedclient\\aionflc.py' 'fedclient\\config.json' > ")
log(INFO, "Please provide input configuration file. example: <python.exe 'fedclient\\aionflc.py' 'fedclient\\config.json'> \\ |
n")
server_address,model_name,problem_type,data_location,model_params,model_version,selected_feature,target_feature,train_size,num_records,wait_time,model_overwrite = dataLoad(json_file)
file_name=model_name+'_'+model_version+".log"
cwd = os.path.abspath(os.path.dirname(__file__))
log_location = os.path.join(cwd, 'logs')
try:
os.makedirs(log_location)
except FileExistsError as fe:
# here,log_location already exists
pass
try:
logobj = logging.getLogger('AION')
fl_log=os.path.normpath(os.path.join(log_location,file_name))
log(INFO, "flclient log file path: %s ",str(fl_log))
logging.basicConfig(filename=fl_log,format='%(asctime)s %(message)s', filemode='w',level=logging.DEBUG)
except Exception as e:
log(INFO, "logging error. Error Msg: %s ",str(e))
pass
## default data location ~data\\inputfile.csv
data_location = os.path.normpath(os.path.join(cwd, data_location))
df = pd.read_csv(data_location)
df =df[~df.isin([np.nan, np.inf, -np.inf]).any(axis=1)]
df=df.reset_index(drop=True)
y=df[target_feature]
# X = df.drop(target_feature, axis=1)
#
# print("selected_feature: \\n",selected_feature)
X=df[selected_feature]
input_shape = X.shape[1] # len(selected_feature)
output_shape = len(y.value_counts())
test_size=(100-train_size)/100
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
no_classes = len(df.groupby(target_feature).count())
no_features=len(selected_feature)
## Pass the train data.
(X_train, y_train) = utils.partition(X_train, y_train, 1)[0]
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
# y_train = pd.get_dummies(y_train)
# y_test = pd.get_dummies(y_test)
y_train_dl = pd.get_dummies(y_train, sparse=True)
y_test_dl = pd.get_dummies(y_test, sparse=True)
if (problem_type.lower() == "classification"):
if (model_name.lower() == "logisticregression"):
#n_classes = df[target_feature].nunique()
no_classes = len(df.groupby(target_feature).count())
no_features=len(selected_feature)
logger.info("no_classes: "+str(no_classes))
logger.info("no_features: "+str(no_features))
modelName="logisticregression"
model = None
model = LogisticRegression(**model_params, warm_start=True)
try:
status=utils.setmodelName(model_name)
utils.set_initial_params(model,no_classes,no_features)
except Exception as e:
print("util error: \\n",e)
num_rounds=round(len(df)/num_records)
log(INFO, "Federated learning Client connecting to Server @: %s ",str(server_address))
try:
flower.client.start_numpy_client(server_address=server_address, client=aionflc(model,num_rounds,model_name,model_version,wait_time,client_id,num_records,model_overwrite,problem_type,X_train_scaled, X_test_scaled, y_train, y_test))
except Exception as e:
logger.info("AION FL Client instance error: \\n"+str(e))
log(INFO, "AION federated learning Client %s execution completed.",str(client_id))
elif (model_name.lower() == "deeplearning"):
optimizer = model_params["optimizer"]
loss_func = model_params["losses"]
act_func = model_params["activation"]
last_act_func = model_params["last_activation"]
input_shape = X.shape[1] # len(selected_feature)
output_shape = len(y.value_counts())
print(f"input_shape:{input_shape}, output_shape:{output_shape}.")
model = None
if output_shape == 2:
if last_act_func == "sigmoid" and loss_func == "binary_crossentropy":
model = dl_model.dl_binary_classification(input_shape, output_shape,
optimizer, loss_func,
act_func, last_act_func)
elif last_act_func == "softmax" and loss_func == "categorical_crossentropy":
model = dl_model.dl_binary_classification(input_shape, output_shape,
optimizer, loss_func,
act_func, last_act_func)
else:
model = dl_model.dl_multiClass_classification(input_shape,
output_shape, optimizer, loss_func,
act_func, last_act_func)
print(model.summary())
# status=utils.setmodelName(modelName)
# utils.set_initial_params(model,no_classes,no_features)
num_rounds=round(len(df)/num_records)
log(INFO, "Federated learning Client connecting to Server @: %s ",str(server_address))
try:
flower.client.start_numpy_client(server_address=server_address,
client=aionflc_dl(model,num_rounds,model_name,model_version,wait_time,client_id,num_records,model_overwrite,problem_type,X_train_scaled, X_test_scaled, y_train_dl, y_test_dl,model_params))
except Exception as e:
logger.info("AION FL Client instance error: \\n"+str(e))
log(INFO, "AION federated learning Client %s execution completed.",str(client_id))
logger.info("AION federated learning Client execution completed."+str(client_id))
elif(problem_type.lower() == "regression"):
if (model_name.lower() == "linearregression"):
# model=LinearRegression(**model_params,warm_start=True)
if model_params['fit_intercept'] == 'True':
model_params['fit_intercept'] = True
else:
model_params['fit_intercept'] = False
if model_params['copy_X'] == 'True':
model_params['copy_X'] = True
else:
model_params['copy_X'] = False
if model_params['positive'] == 'True':
model_params['positive'] = True
else:
model_params['positive'] = False
model=LinearRegression(**model_params)
status=utils.setmodelName(model_name)
utils.set_initial_params_reg(model,X_train.shape[0],X_train.shape[1])
num_rounds=round(len(df)/num_records)
log(INFO, "Federated learning Client connecting to Server @: %s ",str(server_address))
try:
flower.client.start_numpy_client(server_address=server_address, client=aionflc(model,num_rounds,model_name,model_version,wait_time,client_id,num_records,model_overwrite,problem_type,X_train_scaled, X_test_scaled, y_train, y_test))
except Exception as e:
logger.info("AION FL Client instance error: \\n"+str(e))
log(INFO, "AION federated learning Client %s execution completed.",str(client_id))
elif(model_name.lower() == "deeplearning"):
input_shape = X.shape[1] # len(selected_feature)
output_shape = len(y.value_counts())
optimizer = model_params["optimizer"]
loss_func = model_params["losses"]
act_func = model_params["activation"]
model = None
model = dl_model.dl_regression_model(input_shape, 1,
optimizer, loss_func, act_func)
num_rounds=round(len(df)/num_records)
log(INFO, "Federated learning Client connecting to Server @: %s ",str(server_address))
try:
flower.client.start_numpy_client(server_address=server_address, client=aionflc_dl(model,num_rounds,model_name,model_version,wait_time,client_id,num_records,model_overwrite,problem_type,X_train_scaled, X_test_scaled, y_train, y_test,model_params))
except Exception as e:
logger.info("AION FL Client instance error: \\n"+str(e))
log(INFO, "AION federated learning Client %s execution completed.",str(client_id))
<s> import tensorflow as tf
def dl_regression_model(input_shape, output_shape,
optimizer, loss_func, act_func):
inputs = tf.keras.Input(shape=(input_shape,))
x = tf.keras.layers.Dense(64,
kernel_initializer='he_normal',
bias_initializer='zeros',
activation=act_func)(inputs)
x = tf.keras.layers.Dense(32,
kernel_initializer='he_normal',
bias_initializer='zeros',
activation=act_func)(x)
x = tf.keras.layers.Dense(16,
kernel_initializer='he_normal',
bias_initializer='zeros',
activation=act_func)(x)
x = tf.keras.layers.Dense(8,
kernel_initializer='he_normal',
bias_initializer='zeros',
activation=act_func)(x)
outputs = tf.keras.layers.Dense(output_shape,
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
model.compile(loss=loss_func ,
optimizer=optimizer,
metrics=["mean_absolute_error",
"mean_squared_error",
])
return model
def dl_multiClass_classification(input_shape, output_shape,
optimizer, loss_func, act_func, last_act_func):
inputs = tf.keras.Input(shape=(input_shape,))
x = tf.keras.layers.Dense(64,
kernel_initializer='he_normal',
bias_initializer='zeros',
activation=act_func)(inputs)
x = tf.keras.layers.Dense(32,
kernel_initializer='he_normal',
bias_initializer='zeros',
activation=act_func)(x)
x = tf.keras.layers.Dense(16,
kernel_initializer='he_normal',
bias_initializer='zeros',
activation=act_func)(x)
x = tf.keras.layers.Dense(8,
kernel_initializer='he_normal',
bias_initializer='zeros',
activation=act_func)(x)
outputs = tf.keras.layers.Dense(output_shape,
kernel_initializer='he_normal',
bias_initializer='zeros',
activation=last_act_func)(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer, loss_func, metrics=["accuracy"])
return model
def dl_binary_classification(input_shape, output_shape,
optimizer, loss_func, act_func, last_act_func):
inputs = tf.keras.Input(shape=(input_shape,))
x = tf.keras.layers.Dense(64,
kernel_initializer='he_normal',
bias_initializer='zeros',
activation=act_func)(inputs)
x = tf.keras.layers.Dense(32,
kernel_initializer='he_normal',
bias_initializer='zeros',
activation=act_func)(x)
x = tf.keras.layers.Dense(16,
kernel_initializer='he_normal',
bias_initializer='zeros',
activation=act_func)(x)
x = tf.keras.layers.Dense(8,
kernel_initializer='he_normal',
bias_initializer='zeros',
activation=act_func)(x)
outputs = tf.keras.layers.Dense(output_shape,
kernel_initializer='he_normal',
bias_initializer='zeros',
activation=last_act_func)(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer, loss_func,
metrics=["accuracy"])
return model
<s><s> from typing import Tuple, Union, List
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import SGDClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from flwr.common.logger import log
from logging import INFO
XY = Tuple[np.ndarray, np.ndarray]
Dataset = Tuple[XY, XY]
LogRegParams = Union[XY, Tuple[np.ndarray]]
XYList = List[XY]
modelUsed=None
modelname=None
def setmodelName(modelselected):
try:
modelname=str(modelselected)
print("setmodelName ,given modelname: \\n",modelname)
if (modelname.lower() == 'logisticregression'):
modelUsed=LogisticRegression()
return True
elif (modelname.lower() == "linearregression"):
modelUsed = LinearRegression()
return True
elif (modelname.lower() == "sgdclassifier"):
#from sklearn.linear_model import SGDClassifier
modelUsed=SGDClassifier()
return True
elif (modelname.lower() == "knn"):
modelUsed = KNeighborsClassifier()
return True
elif |
(modelname.lower() == "decisiontreeclassifier"):
modelUsed = DecisionTreeClassifier()
return True
else:
return False
except Exception as e:
log(INFO, "set fl model name fn issue: ",e)
def get_model_parameters(model:modelUsed) -> LogRegParams:
"""Returns the paramters of a sklearn LogisticRegression model."""
model_name=model.__class__.__name__
if model.fit_intercept:
params = (model.coef_, model.intercept_)
else:
params = (model.coef_,)
return params
def set_model_params(
model:modelUsed, params: LogRegParams
) -> modelUsed:
"""Sets the parameters of a sklean LogisticRegression model."""
model.coef_ = params[0]
model_name=model.__class__.__name__
try:
if model.fit_intercept:
model.intercept_ = params[1]
except Exception as e:
log(INFO, "set_model_params fn issue: ",e)
pass
return model
def set_initial_params_reg(model,no_vals,no_features):
"""Sets initial parameters as zeros Required since model params are
uninitialized until model.fit is called.
But server asks for initial parameters from clients at launch. Refer
to sklearn.linear_model.LogisticRegression documentation for more
information.
"""
no_vals = no_vals
n_features = no_features
# model.classes_ = np.array([i for i in range(n_classes)])
model.coef_ = np.zeros( n_features,)
model_name=model.__class__.__name__
try:
if model.fit_intercept:
# model.intercept_ = np.ones((no_vals,1))
model.intercept_ = np.zeros((no_vals,))
except Exception as e:
log(INFO, "set_initial_params fn issue: ",e)
pass
def set_initial_params(model,no_classes,no_features):
"""Sets initial parameters as zeros Required since model params are
uninitialized until model.fit is called.
But server asks for initial parameters from clients at launch. Refer
to sklearn.linear_model.LogisticRegression documentation for more
information.
"""
n_classes = no_classes
n_features = no_features
model.classes_ = np.array([i for i in range(n_classes)])
model.coef_ = np.zeros((n_classes, n_features))
model_name=model.__class__.__name__
try:
if model.fit_intercept:
model.intercept_ = np.zeros((n_classes,))
except Exception as e:
log(INFO, "set_initial_params fn issue: ",e)
pass
def shuffle(X: np.ndarray, y: np.ndarray) -> XY:
"""Shuffle X and y."""
rng = np.random.default_rng()
idx = rng.permutation(len(X))
return X[idx], y[idx]
def partition(X: np.ndarray, y: np.ndarray, num_partitions: int) -> XYList:
"""Split X and y into a number of partitions."""
return list(
zip(np.array_split(X, num_partitions), np.array_split(y, num_partitions))
)
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s>
import sys
import os
import pickle
import json
import traceback
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
import scipy
from pandas import json_normalize
from word2number import w2n
from river import stream
class incBatchPredictor():
def __init__(self):
self.home = os.path.dirname(os.path.abspath(__file__))
self.configPath = os.path.join(self.home, 'production', 'Config.json')
self.configDict = {}
self.incFillPath = os.path.join(self.home,'production','profiler','incFill.pkl')
self.incLabelMappingPath = os.path.join(self.home,'production', 'profiler' , 'incLabelMapping.pkl')
self.incCatEncoderPath = os.path.join(self.home, 'production' , 'profiler', 'incCatEncoder.pkl')
self.incScalerPath = os.path.join(self.home, 'production', 'profiler','incScaler.pkl')
self.incFill = None
self.incLabelMapping = None
self.incCatEncoder = None
self.incScaler = None
self.model = None
self.targetCol = None
self.modelName = ''
self.problemType = ''
self.numFtrs = []
self.catFtrs = []
def readData(self, data):
try:
if os.path.splitext(data)[1] == ".tsv":
df=pd.read_csv(data,encoding='utf-8',sep='\\t')
elif os.path.splitext(data)[1] == ".csv":
df=pd.read_csv(data,encoding='utf-8')
elif os.path.splitext(data)[1] == ".dat":
df=pd.read_csv(data,encoding='utf-8')
else:
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
df = json_normalize(jsonData)
df.rename(columns=lambda x:x.strip(), inplace=True)
return df
except KeyError as e:
output = {"status":"FAIL","message":str(e).strip('"')}
print(json.dumps(output))
except Exception as e:
output = {"status":"FAIL","message":str(e).strip('"')}
print(json.dumps(output))
def readConfig(self):
with open(self.configPath, 'r', encoding= 'utf8') as f:
self.configDict = json.load(f)
self.targetCol = self.configDict['targetCol']
if 'numFtrs' in self.configDict:
self.numFtrs = self.configDict['numFtrs']
if 'catFtrs' in self.configDict:
self.catFtrs = self.configDict['catFtrs']
if 'allNumCols' in self.configDict:
self.allNumCols = self.configDict['allNumCols']
if 'allCatCols' in self.configDict:
self.allCatCols = self.configDict['allCatCols']
if 'wordToNumCols' in self.configDict:
self.wordToNumericCols = self.configDict['wordToNumCols']
self.emptyFtrs = self.configDict['emptyFtrs']
self.allFtrs = self.configDict['allFtrs']
self.modelName = self.configDict['modelName']
self.problemType = self.configDict['problemType']
self.modelPath = os.path.join(self.home, 'production', 'model', self.modelName+'.pkl')
self.scoreParam = self.configDict['scoreParam']
self.score = self.configDict['score']
def pickleLoad(self, file):
if os.path.exists(file):
with open(file, 'rb') as f:
model = pickle.load(f)
return model
else:
return None
def s2n(self,value):
try:
x=eval(value)
return x
except:
try:
return w2n.word_to_num(value)
except:
return np.nan
def convertWordToNumeric(self,dataframe,feature):
try:
dataframe[feature]=dataframe[feature].apply(lambda x: self.s2n(x))
return dataframe
except Exception as inst:
self.log.info("convertWordToNumeric Failed ===>"+str(inst))
return dataframe
def loadSavedModels(self):
self.incFill = self.pickleLoad(self.incFillPath)
self.incLabelMapping = self.pickleLoad(self.incLabelMappingPath)
self.incCatEncoder = self.pickleLoad(self.incCatEncoderPath)
self.incScaler = self.pickleLoad(self.incScalerPath)
self.model = self.pickleLoad(self.modelPath)
def apply_river_model(self, x, profModel):
print(profModel.imputers)
return pd.Series(profModel.transform_one(x))
def apply_enc(self, x):
return pd.Series(self.incCatEncoder.transform_one(x))
def dataFramePreProcess(self, df):
df = df.replace(r'^\\s*$', np.NaN, regex=True)
df = df.replace('-', np.nan)
df = df.replace('?', np.nan)
columns = list(df.columns)
if self.wordToNumericCols:
for ftr in self.wordToNumericCols:
if ftr in columns:
tempDataFrame=df.copy(deep=True)
testDf = self.convertWordToNumeric(tempDataFrame,ftr)
try:
df[ftr]=testDf[ftr].astype(float)
except:
pass
columns = list(df.columns)
for empCol in self.emptyFtrs:
if empCol in columns:
df = df.drop(columns=[empCol])
return df
def profiler(self, df):
df = df[self.allFtrs]
df = self.dataFramePreProcess(df)
if 'num_fill' in self.configDict:
if self.configDict['num_fill'] == 'drop':
df = df.dropna(axis = 0, subset=self.allNumCols)
elif self.configDict['num_fill'] == 'zero':
df[self.numFtrs] = df[self.numFtrs].fillna(value = 0.0)
else:
for x in self.numFtrs:
if x == self.targetCol:
continue
df[x] = df[x].fillna(value = self.configDict['num_fill'][x])
if 'cat_fill' in self.configDict:
if self.configDict['cat_fill'] == 'drop':
df = df.dropna(axis = 0, subset=self.allCatCols)
elif self.configDict['cat_fill'] == 'zero':
df[self.catFtrs] = df[self.catFtrs].fillna(value = 0.0)
else:
for x in self.catFtrs:
if x == self.targetCol:
continue
df[x] = df[x].fillna(value = self.configDict['cat_fill'][x])
if self.incCatEncoder:
transformed_data = df[self.catFtrs].apply(lambda row: self.apply_enc(row.to_dict()), axis='columns')
df[self.catFtrs] = transformed_data
if self.incScaler:
df[self.numFtrs] = self.incScaler.transform(df[self.numFtrs])
return df
def trainedModel(self,X):
testStream = stream.iter_pandas(X)
preds = []
if self.problemType.lower() == 'regression':
for xi,yi in testStream:
try:
pred = self.model.predict_proba_one(xi)
preds.append(pred)
except:
pred = self.model.predict_one(xi)
preds.append(pred)
preds = pd.DataFrame(preds)
return preds
elif self.problemType.lower() == 'classification':
for xi,yi in testStream:
try:
pred = self.model.predict_proba_one(xi)
preds.append(pred)
except:
continue
out = pd.DataFrame(preds)
return out
def apply_output_format(self,df,modeloutput):
if self.problemType.lower() == 'regression':
df['prediction'] = modeloutput[0]
df['prediction'] = df['prediction'].round(2)
elif self.problemType.lower() == 'classification':
modeloutput = round(modeloutput,2)
if modeloutput.shape[1] == 1:
df['prediction'] = modeloutput
df['prediction'] = df['prediction'].astype(int)
else:
try:
predCol = modeloutput.idxmax(axis=1)
df['prediction'] = predCol.astype(int)
df['prediction'] = self.incLabelMapping.inverse_transform(df['prediction'])
except:
df['prediction'] = modeloutput.idxmax(axis=1)
df['probability'] = modeloutput.max(axis=1).round(2)
modeloutput.columns = modeloutput.columns.astype(int)
modeloutput.columns = self.incLabelMapping.inverse_transform(list(modeloutput.columns))
df['remarks'] = modeloutput.apply(lambda x: x.to_json(), axis=1)
outputjson = df.to_json(orient='records')
|
outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}
return(json.dumps(outputjson))
def predict(self,data):
try:
df = self.readData(data)
dfOrg = df.copy()
self.readConfig()
if len(self.configDict)!=0:
self.loadSavedModels()
df = self.profiler(df)
modeloutput = self.trainedModel(df)
dfOrg = dfOrg[self.allFtrs]
output = self.apply_output_format(dfOrg, modeloutput)
else:
pass
except Exception as e:
print(traceback.format_exc())
output = {"status":"FAIL","message":str(e).strip('"')}
return output
if __name__ == "__main__":
incBPobj = incBatchPredictor()
output = incBPobj.predict(sys.argv[1])
print("predictions:",output)
<s>
import sys
import os
import pickle
import json
import timeit
import warnings
import traceback
import logging
from pathlib import Path
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pandas import json_normalize
import shutil
from word2number import w2n
from pytz import timezone
import datetime
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score, accuracy_score, r2_score,mean_absolute_error, mean_squared_error, recall_score, precision_score, f1_score
from river import stream
class incBatchLearner():
def __init__(self):
self.home = os.path.dirname(os.path.abspath(__file__))
self.configPath = os.path.join(self.home, 'production', 'Config.json')
self.configDict = {}
self.updConfigDict = None
self.incFillPath = os.path.join(self.home,'production','profiler','incFill.pkl')
self.incOutlierRemPath = os.path.join(self.home, 'production', 'profiler', 'incOutlierRem.pkl')
self.incLabelMappingPath = os.path.join(self.home,'production', 'profiler' , 'incLabelMapping.pkl')
self.incCatEncoderPath = os.path.join(self.home, 'production' , 'profiler', 'incCatEncoder.pkl')
self.incScalerPath = os.path.join(self.home, 'production', 'profiler','incScaler.pkl')
self.testPath = os.path.join(self.home, 'data', 'test.csv')
self.modelName = ''
self.incFill = None
self.incLabelMapping = None
self.incCatEncoder = None
self.incScaler = None
self.incOutlierRem = None
self.model = None
self.targetCol = None
self.numFtrs = []
self.catFtrs = []
self.allFtrs = []
self.logFileName=os.path.join(self.home,'log','model_training_logs.log')
filehandler = logging.FileHandler(self.logFileName, 'a','utf-8')
formatter = logging.Formatter('%(message)s')
filehandler.setFormatter(formatter)
self.log = logging.getLogger('eion')
self.log.propagate = False
self.log.addHandler(filehandler)
self.log.setLevel(logging.INFO)
def readData(self, data, isTest = False):
if not isTest:
self.log.info('New Data Path: '+str(data))
else:
self.log.info('Test Data Path: '+str(data))
startTime = timeit.default_timer()
if os.path.splitext(data)[1] == ".tsv":
df=pd.read_csv(data,encoding='utf-8',sep='\\t')
elif os.path.splitext(data)[1] == ".csv":
df=pd.read_csv(data,encoding='utf-8')
elif os.path.splitext(data)[1] == ".dat":
df=pd.read_csv(data,encoding='utf-8')
else:
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
df = json_normalize(jsonData)
dataLoadTime = timeit.default_timer() - startTime
self.log.info('\\nData Load time(sec) :'+str(dataLoadTime))
self.log.info('\\n First ten rows of new data')
self.log.info(df.head(10))
self.log.info('Data Frame shape: '+str(df.shape))
df.rename(columns=lambda x:x.strip(), inplace=True)
return df
def readConfig(self):
with open(self.configPath, 'r', encoding= 'utf8') as f:
self.configDict = json.load(f)
self.configDict['partialFit']+=1
self.log.info('************* Partial Fit '+str(self.configDict['partialFit'])+' *************** \\n')
msg = '-------> Execution Start Time: '+ datetime.datetime.now(timezone("Asia/Kolkata")).strftime('%Y-%m-%d %H:%M:%S' + ' IST')
self.log.info(msg)
self.targetCol = self.configDict['targetCol']
if 'numFtrs' in self.configDict:
self.numFtrs = self.configDict['numFtrs']
if 'catFtrs' in self.configDict:
self.catFtrs = self.configDict['catFtrs']
if 'allNumCols' in self.configDict:
self.allNumCols = self.configDict['allNumCols']
if 'allCatCols' in self.configDict:
self.allCatCols = self.configDict['allCatCols']
if 'encCols' in self.configDict:
self.encCols = self.configDict['encCols']
if 'wordToNumCols' in self.configDict:
self.wordToNumericCols = self.configDict['wordToNumCols']
self.emptyFtrs = self.configDict['emptyFtrs']
if 'encTarget' in self.configDict:
self.encTarget = self.configDict['encTarget']
if 'noOfClasses' in self.configDict:
self.allClasses = list(range(int(self.configDict['noOfClasses'])))
self.misval_ratio = self.configDict['misval_ratio']
self.allFtrs = self.configDict['allFtrs']
self.modelName = self.configDict['modelName']
self.problemType = self.configDict['problemType']
self.modelPath = os.path.join(self.home, 'production', 'model', self.modelName+'.pkl')
self.scoreParam = self.configDict['scoreParam']
self.score = self.configDict['score']
def pickleLoad(self, file, filename):
if os.path.exists(file):
with open(file, 'rb') as f:
model = pickle.load(f)
file_size = os.path.getsize(file)
self.log.info(str(filename)+" size is :"+str(file_size)+"bytes")
return model
else:
return None
def s2n(self,value):
try:
x=eval(value)
return x
except:
try:
return w2n.word_to_num(value)
except:
return np.nan
def convertWordToNumeric(self,dataframe,feature):
try:
dataframe[feature]=dataframe[feature].apply(lambda x: self.s2n(x))
return dataframe
except Exception as inst:
self.log.info("convertWordToNumeric Failed ===>"+str(inst))
return dataframe
def pickleDump(self, model, path):
if model is not None:
with open(path, 'wb') as f:
pickle.dump(model, f)
def splitTrainTest(self,X,y):
if self.problemType.lower() == 'regression':
xtrain,xtest,ytrain,ytest=train_test_split(X,y,test_size=0.2,shuffle=True)
else:
try:
xtrain,xtest,ytrain,ytest=train_test_split(X,y,stratify=y,test_size=0.2,shuffle=True)
except:
xtrain,xtest,ytrain,ytest=train_test_split(X,y,test_size=0.2,shuffle=True)
return xtrain,xtest,ytrain,ytest
def loadSavedModels(self):
self.incFill = self.pickleLoad(self.incFillPath, 'Online Missing Value Filler')
self.incLabelMapping = self.pickleLoad(self.incLabelMappingPath, 'Online Label Encoder')
self.incCatEncoder = self.pickleLoad(self.incCatEncoderPath, 'Online Categorical Encoder')
self.incScaler = self.pickleLoad(self.incScalerPath, 'Online Scaler')
self.incOutlierRem = self.pickleLoad(self.incOutlierRemPath, 'Online Outlier Detector')
self.model = self.pickleLoad(self.modelPath, str(os.path.basename(self.modelPath))[:-4])
self.log.info('\\nData Profiler and ML models loaded in Memory')
def saveModels(self):
os.makedirs(os.path.join(self.home, 'production', 'profiler'))
os.makedirs(os.path.join(self.home, 'production', 'model'))
if type(self.configDict['num_fill']) == type({}) or type(self.configDict['cat_fill']) == type({}):
self.pickleDump(self.incFill, self.incFillPath)
self.pickleDump(self.incLabelMapping, self.incLabelMappingPath)
self.pickleDump(self.incCatEncoder, self.incCatEncoderPath)
self.pickleDump(self.incScaler, self.incScalerPath)
self.pickleDump(self.incOutlierRem, self.incOutlierRemPath)
self.pickleDump(self.model, self.modelPath)
self.log.info('Models saved into production')
def saveConfig(self):
with open(self.configPath, 'w', encoding= 'utf8') as f:
json.dump(self.updConfigDict, f, ensure_ascii=False)
def apply_river_model(self, x, profModel, isTest):
if not isTest:
profModel.learn_one(x)
return pd.Series(profModel.transform_one(x))
def apply_enc(self, x, isTest):
if not isTest:
y = x[self.encTarget]
self.incCatEncoder.learn_one(x, y)
return pd.Series(self.incCatEncoder.transform_one(x))
def apply_od_pipe(self, x):
score = self.incOutlierRem.score_one(x)
is_anomaly = self.incOutlierRem.classify(score)
self.incOutlierRem.learn_one(x)
return is_anomaly
def dataFramePreProcess(self, df):
df = df.replace('-', np.NaN)
df = df.replace('?', np.NaN)
df = df.replace(r'^\\s*$', np.NaN, regex=True)
columns = list(df.columns)
if self.wordToNumericCols:
for ftr in self.wordToNumericCols:
if ftr in columns:
self.log.info('Converting '+ftr+' to numeric type...')
tempDataFrame=df.copy(deep=True)
testDf = self.convertWordToNumeric(tempDataFrame,ftr)
try:
df[ftr]=testDf[ftr].astype(float)
except:
pass
columns = list(df.columns)
for empCol in self.emptyFtrs:
if empCol in columns:
df = df.drop(columns=[empCol])
columns = list(df.columns)
self.log.info( 'Detecting Missing Values')
nonNAArray=[]
numOfRows = df.shape[0]
for i in columns:
numNa=df.loc[(pd.isna(df[i])),i ].shape[0]
nonNAArray.append(tuple([i,numNa]))
self.missingCols = []
self.emptyCols = []
for item in nonNAArray:
numofMissingVals = item[1]
if(numofMissingVals !=0):
self.log.info('-------> Feature '+str(item[0]))
self.log.info('----------> Number of Empty Rows '+str(numofMissingVals))
self.missingCols.append(item[0])
if(numofMissingVals >= numOfRows * self.misval_ratio):
self.log.info('----------> Empty: Yes')
self.log.info('----------> Permitted Rows: '+str(int(numOfRows * self.misval_ratio)))
self.emptyCols.append(item[0])
if(len(self.missingCols) !=0):
self.log.info( '----------- Detecting for Missing Values End -----------\\n')
else:
self.log.info( '-------> Missing Value Features :Not Any')
self.log.info( '----------- Detecting for Missing Values End -----------\\n')
return df
def profiler(self, df, isTest=False):
if not isTest:
self.log.info('Starting |
profiling of New Training Data')
else:
self.log.info('Starting profiling of Testing Data')
startTime = timeit.default_timer()
df = self.dataFramePreProcess(df)
if 'num_fill' in self.configDict:
if self.configDict['num_fill'] == 'drop':
df = df.dropna(axis = 0, subset=self.allNumCols)
elif self.configDict['num_fill'] == 'zero':
df[self.allNumCols] = df[self.allNumCols].fillna(value = 0.0)
else:
df[self.allNumCols]= df[self.allNumCols].apply(pd.to_numeric)
df = df.astype(object).where(df.notna(), None) #river expects nan values to be None
df[self.allNumCols]= df[self.allNumCols].apply(lambda row: self.apply_river_model(row.to_dict(), self.incFill['num_fill'], isTest), axis='columns')
if not isTest:
self.updConfigDict['num_fill'] = {col:self.incFill['num_fill'].stats[col].get() for col in self.allNumCols}
if 'cat_fill' in self.configDict:
if self.configDict['cat_fill'] == 'drop':
df = df.dropna(axis = 0, subset=self.allCatCols)
elif self.configDict['cat_fill'] == 'zero':
df[self.allCatCols] = df[self.allCatCols].fillna(value = 0.0)
else:
df = df.astype(object).where(df.notna(), None)
df[self.allCatCols]= df[self.allCatCols].apply(lambda row: self.apply_river_model(row.to_dict(),self.incFill['cat_fill'], isTest), axis='columns')
if not isTest:
self.updConfigDict['cat_fill'] = {col:self.incFill['cat_fill'].stats[col].get() for col in self.allCatCols}
if not isTest:
self.log.info('Missing value profiler model updated')
if self.incLabelMapping:
uq_classes = df[self.targetCol].unique()
le_classes = list(self.incLabelMapping.classes_)
uq_classes = [type(le_classes[0])(x) for x in uq_classes]
unseen_classes = set(uq_classes) - set(le_classes)
self.log.info('Existing classes: '+str(le_classes))
if len(unseen_classes)>0:
self.log.info('New unseen classes: '+str(unseen_classes))
le_classes.extend(unseen_classes)
from sklearn.preprocessing import LabelEncoder
self.incLabelMapping = LabelEncoder()
self.incLabelMapping.fit(le_classes)
self.log.info(self.incLabelMapping.classes_)
self.log.info('Label encoder refitted with new unseen classes')
df[self.targetCol] = df[self.targetCol].apply(str)
df[self.targetCol] = self.incLabelMapping.transform(df[self.targetCol])
if not isTest:
self.log.info('Target column label encoding is done')
if self.incCatEncoder:
if self.problemType.lower() == 'regression':
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
self.encTarget = 'scaledTarget'
df['scaledTarget'] = sc.fit_transform(df[self.targetCol].to_numpy().reshape(-1,1))
transformed_data = df[self.encCols].apply(lambda row: self.apply_enc(row.to_dict(), isTest), axis='columns')
if self.targetCol in transformed_data.columns:
transformed_data.drop(self.targetCol, inplace=True, axis = 1)
df[self.catFtrs] = transformed_data
if not isTest:
self.updConfigDict['catEnc'] = []
if len(self.catFtrs) == 1:
col = self.catFtrs[0]
self.configDict['catEnc'].append({col:self.incCatEncoder['TargetAgg'].state.to_dict()})
else:
for i, col in enumerate(self.catFtrs):
if i==0:
no = ''
else:
no = str(i)
self.configDict['catEnc'].append({col:self.incCatEncoder['TransformerUnion']['TargetAgg'+no].state.to_dict()})
self.log.info('Categorical encoding is done and profiler model updated')
if self.incScaler:
if not isTest:
self.incScaler = self.incScaler.partial_fit(df[self.numFtrs])
self.log.info('Numerical features scaled and profiler model updated')
df[self.numFtrs] = self.incScaler.transform(df[self.numFtrs])
if self.incOutlierRem and not isTest:
df = df[df[self.numFtrs].apply(lambda x: False if self.apply_od_pipe(x.to_dict()) else True, axis=1)]
df.reset_index(drop=True, inplace=True)
self.log.info('Outliers removed and profiler model updated')
if not isTest:
self.log.info('Check config file in production folder for updated profiler values')
profilerTime = timeit.default_timer() - startTime
self.log.info('\\nProfiling time(sec) :'+str(profilerTime))
return df
def riverTrain(self, X, Y):
trainStream = stream.iter_pandas(X, Y)
for i, (xi, yi) in enumerate(trainStream):
if yi!=None:
self.model.learn_one(xi, yi)
def riverEvaluate(self, xtest):
testStream = stream.iter_pandas(xtest)
preds = []
for xi,yi in testStream:
pred = self.model.predict_one(xi)
preds.append(pred)
return preds
def trainModel(self,df):
startTime = timeit.default_timer()
X = df[self.allFtrs]
Y = df[self.targetCol]
try:
self.riverTrain(X,Y)
trainTime = timeit.default_timer() - startTime
self.log.info('\\nModel Training time(sec) :'+str(trainTime))
self.log.info(self.modelName+' model updated')
self.log.info('First fit model params are '+str(self.configDict['modelParams']))
except Exception as e:
raise e
def archiveModels(self):
source = os.path.join(self.home, 'production')
archivePath = os.path.join(self.home,'archives')
if os.path.isdir(archivePath):
NoOfArchives = sum(os.path.isdir(os.path.join(self.home,'archives',str(i))) for i in os.listdir(archivePath))
destination = os.path.join(self.home,'archives',str(NoOfArchives+1))
else:
destination = os.path.join(archivePath,'1')
if not os.path.exists(destination):
os.makedirs(destination)
allfiles = os.listdir(source)
for f in allfiles:
src_path = os.path.join(source, f)
dst_path = os.path.join(destination, f)
shutil.move(src_path, dst_path)
self.log.info('Previous production models archived')
def get_score(self,metric,actual,predict):
if 'accuracy' in str(metric).lower():
score = accuracy_score(actual,predict)
score = score*100
elif 'recall' in str(metric).lower():
score = recall_score(actual,predict,average='macro')
score = score*100
elif 'precision' in str(metric).lower():
score = precision_score(actual,predict,average='macro')
score = score*100
elif 'f1_score' in str(metric).lower():
score = f1_score(actual,predict, average='macro')
score = score*100
elif 'roc_auc' in str(metric).lower():
try:
score = roc_auc_score(actual,predict,average="macro")
except:
try:
actual = pd.get_dummies(actual)
predict = pd.get_dummies(predict)
score = roc_auc_score(actual,predict, average='weighted', multi_class='ovr')
except:
score = 0
score = score*100
elif ('mse' in str(metric).lower()) or ('neg_mean_squared_error' in str(metric).lower()):
score = mean_squared_error(actual,predict)
elif ('rmse' in str(metric).lower()) or ('neg_root_mean_squared_error' in str(metric).lower()):
score=mean_squared_error(actual,predict,squared=False)
elif ('mae' in str(metric).lower()) or ('neg_mean_absolute_error' in str(metric).lower()):
score=mean_absolute_error(actual,predict)
elif 'r2' in str(metric).lower():
score=r2_score(actual,predict)*100
return round(score,2)
def checkColumns(self, df):
self.log.info('Checking DataColumns in new data')
dfCols = list(df.columns)
allCols = self.allFtrs.copy()
allCols.append(self.targetCol)
missingCols = []
for col in allCols:
if col not in dfCols:
missingCols.append(col)
if len(missingCols)>0:
raise Exception('DataFrame is missing columns: '+str(missingCols))
else:
self.log.info('All required columns are present: '+str(list(dfCols)[:500]))
def plotMetric(self):
y = self.configDict['metricList']
fedrows = self.configDict['trainRowsList']
fig = plt.figure()
ax = fig.gca()
if self.configDict['problemType'] == 'classification':
ax.set_yticks(np.arange(0, 110, 10))
plt.ylim(ymin=0)
if self.configDict['problemType'] == 'regression':
minMet = min(y)
maxMet = max(y)
plt.ylim(minMet - 10, maxMet+10)
plt.plot(y)
plt.ylabel(self.scoreParam)
plt.xlabel('Partial Fits')
plt.title(str(self.scoreParam)+' over training rows')
if type(fedrows[0])!=type(''):
fedrows = [str(x) for x in fedrows]
x = list(range(len(fedrows)))
for i in range(len(fedrows)):
plt.annotate(fedrows[i], (x[i], y[i] + 5))
if self.configDict['problemType'] == 'classification':
plt.annotate(round(y[i],1), (x[i], y[i] - 3))
plt.grid()
plt.savefig(os.path.join(self.home, 'production','metric'))
return
def updateLearning(self,data):
try:
self.readConfig()
self.updConfigDict = self.configDict.copy()
df = self.readData(data)
self.checkColumns(df)
self.loadSavedModels()
X = df[self.allFtrs]
y = df[self.targetCol]
xtrain,xtest,ytrain,ytest = self.splitTrainTest(X,y)
dftrain = pd.concat((xtrain, ytrain), axis = 1)
dftest = pd.concat((xtest, ytest), axis = 1)
dftrain = self.profiler(dftrain)
dftest = self.profiler(dftest, isTest = True)
xtest = dftest[self.allFtrs]
ytest = dftest[self.targetCol]
self.trainModel(dftrain)
preds = self.riverEvaluate(xtest)
score = self.get_score(self.scoreParam, ytest, preds)
self.updConfigDict['score'] = score
self.log.info('Previous '+self.scoreParam+': '+str(self.configDict['score']))
self.log.info('Current '+self.scoreParam+': '+str(self.updConfigDict['score']))
self.configDict['trainRowsList'].append(self.configDict['trainRowsList'][-1]+xtrain.shape[0])
self.log.info('Number of data points trained on so far: '+str(self.configDict['trainRowsList'][-1]))
self.configDict['metricList'].append(self.updConfigDict['score'])
self.archiveModels()
self.plotMetric()
self.saveModels()
self.saveConfig()
msg = self.scoreParam+': Previous:'+str(self.configDict['score'])+' Current:'+ str(self.updConfigDict['score'])
output = {"status":"SUCCESS","Msg":msg}
self.log.info(str(output))
except Exception as e:
print(traceback.format_exc())
self.log.info('Partial Fit Failed '+str(traceback.format_ |
exc()))
if self.updConfigDict != None:
self.saveConfig()
output = {"status":"FAIL","Msg":str(e).strip('"')}
return json.dumps(output)
if __name__ == "__main__":
incBLObj = incBatchLearner()
output = incBLObj.updateLearning(sys.argv[1])
print("aion_learner_status:",output)
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import io
import json
import logging
import pandas as pd
import sys
import numpy as np
from pathlib import Path
from word2number import w2n
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OrdinalEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.impute import SimpleImputer, KNNImputer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import FunctionTransformer
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.preprocessing import PowerTransformer
from sklearn.compose import ColumnTransformer
from sklearn.base import TransformerMixin
from sklearn.ensemble import IsolationForest
from category_encoders import TargetEncoder
import scipy
try:
import transformations.data_profiler_functions as cs
except:
import data_profiler_functions as cs
if 'AION' in sys.modules:
try:
from appbe.app_config import DEBUG_ENABLED
except:
DEBUG_ENABLED = False
else:
DEBUG_ENABLED = False
log_suffix = f'[{Path(__file__).stem}] '
class profiler():
def __init__(self, xtrain, ytrain=None, target=None, encode_target = False, config={}, keep_unprocessed=[],data_path=None,log=None):
if not isinstance(xtrain, pd.DataFrame):
raise ValueError(f'{log_suffix}supported data type is pandas.DataFrame but provide data is of {type(xtrain)} type')
if xtrain.empty:
raise ValueError(f'{log_suffix}Data frame is empty')
if target and target in xtrain.columns:
self.target = xtrain[target]
xtrain.drop(target, axis=1, inplace=True)
self.target_name = target
elif ytrain:
self.target = ytrain
self.target_name = 'target'
else:
self.target = pd.Series()
self.target_name = None
self.data_path = data_path
self.encode_target = encode_target
self.label_encoder = None
self.data = xtrain
self.keep_unprocessed = keep_unprocessed
self.colm_type = {}
for colm, infer_type in zip(self.data.columns, self.data.dtypes):
self.colm_type[colm] = infer_type
self.numeric_feature = []
self.cat_feature = []
self.text_feature = []
self.wordToNumericFeatures = []
self.added_features = []
self.pipeline = []
self.dropped_features = {}
self.train_features_type={}
self.__update_type()
self.config = config
self.featureDict = config.get('featureDict', [])
self.output_columns = []
self.feature_expender = []
self.text_to_num = {}
self.force_numeric_conv = []
if log:
self.log = log
else:
self.log = logging.getLogger('eion')
self.type_conversion = {}
def log_dataframe(self, msg=None):
buffer = io.StringIO()
self.data.info(buf=buffer)
if msg:
log_text = f'Data frame after {msg}:'
else:
log_text = 'Data frame:'
log_text += '\\n\\t'+str(self.data.head(2)).replace('\\n','\\n\\t')
log_text += ('\\n\\t' + buffer.getvalue().replace('\\n','\\n\\t'))
self.log.info(log_text)
def transform(self):
if self.is_target_available():
if self.target_name:
self.log.info(f"Target feature name: '{self.target_name}'")
self.log.info(f"Target feature size: {len(self.target)}")
else:
self.log.info(f"Target feature not present")
self.log_dataframe()
print(self.data.info())
try:
self.process()
except Exception as e:
self.log.error(e, exc_info=True)
raise
pipe = FeatureUnion(self.pipeline)
try:
if self.text_feature:
from text.textProfiler import set_pretrained_model
set_pretrained_model(pipe)
conversion_method = self.get_conversion_method()
process_data = pipe.fit_transform(self.data, y=self.target)
# save for testing
if DEBUG_ENABLED:
if isinstance(process_data, scipy.sparse.spmatrix):
process_data = process_data.toarray()
df = pd.DataFrame(process_data)
df.to_csv('debug_preprocessed.csv', index=False)
if self.text_feature and conversion_method == 'latentsemanticanalysis':
n_size = self.get_tf_idf_output_size( pipe)
dimensions = self.get_tf_idf_dimensions()
if n_size < dimensions or n_size > dimensions:
dimensions = n_size
from sklearn.decomposition import TruncatedSVD
reducer = TruncatedSVD( n_components = dimensions)
reduced_data = reducer.fit_transform( process_data[:,-n_size:])
text_process_idx = [t[0] for t in pipe.transformer_list].index('text_process')
pipe.transformer_list[text_process_idx][1].steps.append(('feature_reducer',reducer))
if isinstance(process_data, scipy.sparse.spmatrix):
process_data = process_data.toarray()
process_data = np.concatenate((process_data[:,:-n_size], reduced_data), axis=1)
last_step = self.feature_expender.pop()
self.feature_expender.append({'feature_reducer':list(last_step.values())[0]})
except EOFError as e:
if "Compressed file ended before the end-of-stream marker was reached" in str(e):
raise EOFError('Pretrained model is not downloaded properly')
self.update_output_features_names(pipe)
if isinstance(process_data, scipy.sparse.spmatrix):
process_data = process_data.toarray()
df = pd.DataFrame(process_data, index=self.data.index, columns=self.output_columns)
if self.is_target_available() and self.target_name:
df[self.target_name] = self.target
if self.keep_unprocessed:
df[self.keep_unprocessed] = self.data[self.keep_unprocessed]
self.log_numerical_fill()
self.log_categorical_fill()
self.log_normalization()
return df, pipe, self.label_encoder
def log_type_conversion(self):
if self.log:
self.log.info('----------- Inspecting Features -----------')
self.log.info('----------- Type Conversion -----------')
count = 0
for k, v in self.type_conversion.items():
if v[0] != v[1]:
self.log.info(f'-------> {k} -> from {v[0]} to {v[1]} : {v[2]}')
self.log.info('Status:- |... Feature inspection done')
def check_config(self):
removeDuplicate = self.config.get('removeDuplicate', False)
self.config['removeDuplicate'] = cs.get_boolean(removeDuplicate)
self.config['misValueRatio'] = float(self.config.get('misValueRatio', cs.default_config['misValueRatio']))
self.config['numericFeatureRatio'] = float(self.config.get('numericFeatureRatio', cs.default_config['numericFeatureRatio']))
self.config['categoryMaxLabel'] = int(self.config.get('categoryMaxLabel', cs.default_config['categoryMaxLabel']))
featureDict = self.config.get('featureDict', [])
if isinstance(featureDict, dict):
self.config['featureDict'] = []
if isinstance(featureDict, str):
self.config['featureDict'] = []
def process(self):
#remove duplicate not required at the time of prediction
self.check_config()
self.remove_constant_feature()
self.remove_empty_feature(self.config['misValueRatio'])
self.remove_index_features()
self.dropna()
if self.config['removeDuplicate']:
self.drop_duplicate()
#self.check_categorical_features()
#self.string_to_numeric()
self.process_target()
self.train_features_type = {k:v for k,v in zip(self.data.columns, self.data.dtypes)}
self.parse_process_step_config()
self.process_drop_fillna()
self.log_type_conversion()
self.update_num_fill_dict()
if DEBUG_ENABLED:
print(self.num_fill_method_dict)
self.update_cat_fill_dict()
self.create_pipeline()
self.text_pipeline(self.config)
self.apply_outlier()
if DEBUG_ENABLED:
self.log.info(self.process_method)
self.log.info(self.pipeline)
def is_target_available(self):
return (isinstance(self.target, pd.Series) and not self.target.empty) or len(self.target)
def process_target(self, operation='encode', arg=None):
if self.is_target_available():
# drop null values
self.__update_index( self.target.notna(), 'target')
if self.encode_target:
self.label_encoder = LabelEncoder()
self.target = self.label_encoder.fit_transform(self.target)
return self.label_encoder
return None
def is_target_column(self, column):
return column == self.target_name
def fill_default_steps(self):
num_fill_method = cs.get_one_true_option(self.config.get('numericalFillMethod',{}))
normalization_method = cs.get_one_true_option(self.config.get('normalization',{}),'none')
for colm in self.numeric_feature:
if num_fill_method:
self.fill_missing_value_method(colm, num_fill_method.lower())
if normalization_method:
self.fill_normalizer_method(colm, normalization_method.lower())
cat_fill_method = cs.get_one_true_option(self.config.get('categoricalFillMethod',{}))
cat_encode_method = cs.get_one_true_option(self.config.get('categoryEncoding',{}))
for colm in self.cat_feature:
if cat_fill_method:
self.fill_missing_value_method(colm, cat_fill_method.lower())
if cat_encode_method:
self.fill_encoder_value_method(colm, cat_encode_method.lower(), default=True)
def parse_process_step_config(self):
self.process_method = {}
user_provided_data_type = {}
for feat_conf in self.featureDict:
colm = feat_conf.get('feature', '')
if not self.is_target_column(colm):
if colm in self.data.columns:
user_provided_data_type[colm] = feat_conf['type']
if user_provided_data_type:
self.update_user_provided_type(user_provided_data_type)
self.fill_default_steps()
for feat_conf in self.featureDict:
colm = feat_conf.get('feature', '')
if not self.is_target_column(colm):
if colm in self.data.columns:
if feat_conf.get('fillMethod', None):
self.fill_missing_value_method(colm, feat_conf['fillMethod'].lower())
if feat_conf.get('categoryEncoding', None):
self.fill_encoder_value_method(colm, feat_conf['categoryEncoding'].lower())
if feat_conf.get('normalization', None):
self.fill_normalizer_method(colm, feat_conf['normalization'].lower())
if feat_conf.get('outlier', None):
self.fill_outlier_method(colm, feat_conf['outlier'].lower())
if feat_conf.get('outlierOperation', None):
self.fill_outlier_process(colm, feat_conf['outlierOperation'].lower())
def get_tf_idf_dimensions(self):
dim = cs.get_one_true_option(self.config.get('embeddingSize',{}).get('TF_IDF',{}), 'default')
return {'default': 300, '50d':50, '100d':100, '200d':200, '300d':300}[dim]
def get_tf_idf_output_size(self, pipe):
start_index = {}
for feat_expender in self.feature_expender:
if feat_expender:
step_name = list(feat_expender.keys())[0]
index = list(feat_expender.values())[0]
for transformer_step in pipe.transformer_list:
if transformer_step[1].steps[-1][0] in step_name:
start_index[index] = {transformer_step[1].steps[-1][0]: transformer_step[1].steps[-1][1].get_feature_names_out()}
if start_index:
for |
key,value in start_index.items():
for k,v in value.items():
if k == 'vectorizer':
return len(v)
return 0
def update_output_features_names(self, pipe):
columns = self.output_columns
start_index = {}
index_shifter = 0
for feat_expender in self.feature_expender:
if feat_expender:
step_name = list(feat_expender.keys())[0]
for key,value in start_index.items():
for k,v in value.items():
index_shifter += len(v)
index = list(feat_expender.values())[0]
for transformer_step in pipe.transformer_list:
if transformer_step[1].steps[-1][0] in step_name:
start_index[index + index_shifter] = {transformer_step[1].steps[-1][0]: transformer_step[1].steps[-1][1].get_feature_names_out()}
#print(start_index)
if start_index:
for key,value in start_index.items():
for k,v in value.items():
if k == 'vectorizer':
v = [f'{x}_vect' for x in v]
self.output_columns[key:key] = v
self.added_features = [*self.added_features, *v]
def text_pipeline(self, conf_json):
if self.text_feature:
from text.textProfiler import textProfiler
from text.textProfiler import textCombine
pipeList = []
text_pipe = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", self.text_feature)
], remainder="drop")),
("text_fillNa",SimpleImputer(strategy='constant', fill_value='')),
("merge_text_feature", textCombine())])
obj = textProfiler()
pipeList = obj.textProfiler(conf_json, pipeList, self.data_path)
last_step = "merge_text_feature"
for pipe_elem in pipeList:
text_pipe.steps.append((pipe_elem[0], pipe_elem[1]))
last_step = pipe_elem[0]
text_transformer = ('text_process', text_pipe)
self.pipeline.append(text_transformer)
self.feature_expender.append({last_step:len(self.output_columns)})
def create_pipeline(self):
num_pipe = {}
for k,v in self.num_fill_method_dict.items():
for k1,v1 in v.items():
if k1 and k1 != 'none':
num_pipe[f'{k}_{k1}'] = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", v1)
], remainder="drop")),
(k, self.get_num_imputer(k)),
(k1, self.get_num_scaler(k1))
])
else:
num_pipe[f'{k}_{k1}'] = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", v1)
], remainder="drop")),
(k, self.get_num_imputer(k))
])
self.output_columns.extend(v1)
cat_pipe = {}
for k,v in self.cat_fill_method_dict.items():
for k1,v1 in v.items():
cat_pipe[f'{k}_{k1}'] = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", v1)
], remainder="drop")),
(k, self.get_cat_imputer(k)),
(k1, self.get_cat_encoder(k1))
])
if k1 not in ['onehotencoding']:
self.output_columns.extend(v1)
else:
self.feature_expender.append({k1:len(self.output_columns)})
for key, pipe in num_pipe.items():
self.pipeline.append((key, pipe))
for key, pipe in cat_pipe.items():
self.pipeline.append((key, pipe))
"Drop: feature during training but replace with zero during prediction "
def process_drop_fillna(self):
drop_column = []
if 'numFill' in self.process_method.keys():
for col, method in self.process_method['numFill'].items():
if method == 'drop':
self.process_method['numFill'][col] = 'zero'
drop_column.append(col)
if 'catFill' in self.process_method.keys():
for col, method in self.process_method['catFill'].items():
if method == 'drop':
self.process_method['catFill'][col] = 'zero'
drop_column.append(col)
if drop_column:
self.data.dropna(subset=drop_column, inplace=True)
def update_num_fill_dict(self):
self.num_fill_method_dict = {}
if 'numFill' in self.process_method.keys():
for f in cs.supported_method['fillNa']['numeric']:
self.num_fill_method_dict[f] = {}
for en in cs.supported_method['normalization']:
self.num_fill_method_dict[f][en] = []
for col in self.numeric_feature:
numFillDict = self.process_method.get('numFill',{})
normalizationDict = self.process_method.get('normalization',{})
if f == numFillDict.get(col, '') and en == normalizationDict.get(col,''):
self.num_fill_method_dict[f][en].append(col)
if not self.num_fill_method_dict[f][en] :
del self.num_fill_method_dict[f][en]
if not self.num_fill_method_dict[f]:
del self.num_fill_method_dict[f]
def update_cat_fill_dict(self):
self.cat_fill_method_dict = {}
if 'catFill' in self.process_method.keys():
for f in cs.supported_method['fillNa']['categorical']:
self.cat_fill_method_dict[f] = {}
for en in cs.supported_method['categoryEncoding']:
self.cat_fill_method_dict[f][en] = []
for col in self.cat_feature:
catFillDict = self.process_method.get('catFill',{})
catEncoderDict = self.process_method.get('catEncoder',{})
if f == catFillDict.get(col, '') and en == catEncoderDict.get(col,''):
self.cat_fill_method_dict[f][en].append(col)
if not self.cat_fill_method_dict[f][en] :
del self.cat_fill_method_dict[f][en]
if not self.cat_fill_method_dict[f]:
del self.cat_fill_method_dict[f]
def __update_type(self):
self.numeric_feature = list( set(self.data.select_dtypes(include='number').columns.tolist()) - set(self.keep_unprocessed))
self.cat_feature = list( set(self.data.select_dtypes(include='category').columns.tolist()) - set(self.keep_unprocessed))
self.text_feature = list( set(self.data.select_dtypes(include='object').columns.tolist()) - set(self.keep_unprocessed))
self.datetime_feature = list( set(self.data.select_dtypes(include='datetime').columns.tolist()) - set(self.keep_unprocessed))
def update_user_provided_type(self, data_types):
allowed_types = ['numerical','categorical', 'text']
skipped_types = ['date','index']
type_mapping = {'numerical': np.dtype('float'), 'float': np.dtype('float'),'categorical': 'category', 'text':np.dtype('object'),'date':'datetime64[ns]','index': np.dtype('int64'),}
mapped_type = {k:type_mapping[v] for k,v in data_types.items() if v in allowed_types}
skipped_features = [k for k,v in data_types.items() if v in skipped_types]
if skipped_features:
self.keep_unprocessed.extend( skipped_features)
self.keep_unprocessed = list(set(self.keep_unprocessed))
self.update_type(mapped_type, 'user provided data type')
def get_type(self, as_list=False):
if as_list:
return [self.colm_type.values()]
else:
return self.colm_type
def update_type(self, data_types={}, reason=''):
invalid_features = [x for x in data_types.keys() if x not in self.data.columns]
if invalid_features:
valid_feat = list(set(data_types.keys()) - set(invalid_features))
valid_feat_type = {k:v for k,v in data_types if k in valid_feat}
else:
valid_feat_type = data_types
for k,v in valid_feat_type.items():
if v != self.colm_type[k].name:
try:
self.data.astype({k:v})
self.colm_type.update({k:self.data[k].dtype})
self.type_conversion[k] = (self.colm_type[k] , v, 'Done', reason)
except:
self.type_conversion[k] = (self.colm_type[k] , v, 'Fail', reason)
if v == np.dtype('float64') and self.colm_type[k].name == 'object':
if self.check_numeric( k):
self.data[ k] = pd.to_numeric(self.data[ k], errors='coerce')
self.type_conversion[k] = (self.colm_type[k] , v, 'Done', reason)
self.force_numeric_conv.append( k)
else:
raise ValueError(f"Can not convert '{k}' feature to 'numeric' as numeric values are less than {self.config['numericFeatureRatio'] * 100}%")
self.data = self.data.astype(valid_feat_type)
self.__update_type()
def check_numeric(self, feature):
col_values = self.data[feature].copy()
col_values = pd.to_numeric(col_values, errors='coerce')
if col_values.count() >= (self.config['numericFeatureRatio'] * len(col_values)):
return True
return False
def string_to_numeric(self):
def to_number(x):
try:
return w2n.word_to_num(x)
except:
return np.nan
for col in self.text_feature:
col_values = self.data[col].copy()
col_values = pd.to_numeric(col_values, errors='coerce')
if col_values.count() >= (self.config['numericFeatureRatio'] * len(col_values)):
self.text_to_num[col] = 'float64'
self.wordToNumericFeatures.append(col)
if self.text_to_num:
columns = list(self.text_to_num.keys())
self.data[columns] = self.data[columns].apply(lambda x: to_number(x), axis=1, result_type='broadcast')
self.update_type(self.text_to_num)
self.log.info('----------- Inspecting Features -----------')
for col in self.text_feature:
self.log.info(f'-------> Feature : {col}')
if col in self.text_to_num:
self.log.info('----------> Numeric Status :Yes')
self.log.info('----------> Data Type Converting to numeric :Yes')
else:
self.log.info('----------> Numeric Status :No')
self.log.info(f'\\nStatus:- |... Feature inspection done for numeric data: {len(self.text_to_num)} feature(s) converted to numeric')
self.log.info(f'\\nStatus:- |... Feature word to numeric treatment done: {self.text_to_num}')
self.log.info('----------- Inspecting Features End -----------')
def check_categorical_features(self):
num_data = self.data.select_dtypes(include='number')
num_data_unique = num_data.nunique()
num_to_cat_col = {}
for i, value in enumerate(num_data_unique):
if value < self.config['categoryMaxLabel']:
num_to_cat_col[num_data_unique.index[i]] = 'category'
if num_to_cat_col:
self.update_type(num_to_cat_col, 'numerical to categorical')
str_to_cat_col = {}
str_data = self.data.select_dtypes(include='object')
str_data_unique = str_data.nunique()
for i, value in enumerate(str_data_unique):
if value < self.config['categoryMaxLabel']:
str_to_cat_col[str_data_unique.index[i]] = 'category'
for colm in str_data.columns:
if self.data[colm].str.len().max() < cs.default_config['str_to_cat_len_max']:
str_to_cat_col[colm] = 'category'
if str_to_cat_col:
self.update_type(str_to_cat_col, 'text to categorical')
def drop_features(self, features=[], reason='unspecified'):
if isinstance(features, str):
features = [features]
feat_to_remove = [x for x in features if x in self.data.columns]
if feat_to_remove:
self.data.drop(feat_to_remove, axis=1, inplace=True)
for feat in feat_to_remove:
self.dropped_features[feat] = reason
self.log_drop_feature(feat_to_remove, reason)
self.__update_type()
def __update_index(self, indices, reason=''):
if isinstance(indices, (bool, pd.core.series.Series)) and len(indices) == len(self.data):
if not indices.all():
self.data = self.data[indices]
if self.is_target_available():
self.target = self.target[indices]
self.log_update_index((indices == False).sum(), reason)
def dropna(self):
self.data.dropna(how='all',inplace=True)
if self.is_target_available():
self.target = self.target[self.data.index]
def drop_duplicate(self):
index = self.data.duplicated(keep='first')
self.__update_index( ~index, reason='dup |
licate')
def log_drop_feature(self, columns, reason):
self.log.info(f'---------- Dropping {reason} features ----------')
self.log.info(f'\\nStatus:- |... {reason} feature treatment done: {len(columns)} {reason} feature(s) found')
self.log.info(f'-------> Drop Features: {columns}')
self.log.info(f'Data Frame Shape After Dropping (Rows,Columns): {self.data.shape}')
def log_update_index(self,count, reason):
if count:
if reason == 'target':
self.log.info('-------> Null Target Rows Drop:')
self.log.info(f'-------> Dropped rows count: {count}')
elif reason == 'duplicate':
self.log.info('-------> Duplicate Rows Drop:')
self.log.info(f'-------> Dropped rows count: {count}')
elif reason == 'outlier':
self.log.info(f'-------> Dropped rows count: {count}')
self.log.info('Status:- |... Outlier treatment done')
self.log.info(f'-------> Data Frame Shape After Dropping samples(Rows,Columns): {self.data.shape}')
def log_normalization(self):
if self.process_method.get('normalization', None):
self.log.info(f'\\nStatus:- !... Normalization treatment done')
for method in cs.supported_method['normalization']:
cols = []
for col, m in self.process_method['normalization'].items():
if m == method:
cols.append(col)
if cols and method != 'none':
self.log.info(f'Running {method} on features: {cols}')
def log_numerical_fill(self):
if self.process_method.get('numFill', None):
self.log.info(f'\\nStatus:- !... Fillna for numeric feature done')
for method in cs.supported_method['fillNa']['numeric']:
cols = []
for col, m in self.process_method['numFill'].items():
if m == method:
cols.append(col)
if cols:
self.log.info(f'-------> Running {method} on features: {cols}')
def log_categorical_fill(self):
if self.process_method.get('catFill', None):
self.log.info(f'\\nStatus:- !... FillNa for categorical feature done')
for method in cs.supported_method['fillNa']['categorical']:
cols = []
for col, m in self.process_method['catFill'].items():
if m == method:
cols.append(col)
if cols:
self.log.info(f'-------> Running {method} on features: {cols}')
def remove_constant_feature(self):
unique_values = self.data.nunique()
constant_features = []
for i, value in enumerate(unique_values):
if value == 1:
constant_features.append(unique_values.index[i])
if constant_features:
self.drop_features(constant_features, "constant")
def remove_empty_feature(self, misval_ratio=1.0):
missing_ratio = self.data.isnull().sum() / len(self.data)
missing_ratio = {k:v for k,v in zip(self.data.columns, missing_ratio)}
empty_features = [k for k,v in missing_ratio.items() if v > misval_ratio]
if empty_features:
self.drop_features(empty_features, "empty")
def remove_index_features(self):
index_feature = []
for feat in self.numeric_feature:
if self.data[feat].nunique() == len(self.data):
#if (self.data[feat].sum()- sum(self.data.index) == (self.data.iloc[0][feat]-self.data.index[0])*len(self.data)):
# index feature can be time based
count = (self.data[feat] - self.data[feat].shift() == 1).sum()
if len(self.data) - count == 1:
index_feature.append(feat)
self.drop_features(index_feature, "index")
def fill_missing_value_method(self, colm, method):
if colm in self.numeric_feature:
if method in cs.supported_method['fillNa']['numeric']:
if 'numFill' not in self.process_method.keys():
self.process_method['numFill'] = {}
if method == 'na' and self.process_method['numFill'].get(colm, None):
pass # don't overwrite
else:
self.process_method['numFill'][colm] = method
if colm in self.cat_feature:
if method in cs.supported_method['fillNa']['categorical']:
if 'catFill' not in self.process_method.keys():
self.process_method['catFill'] = {}
if method == 'na' and self.process_method['catFill'].get(colm, None):
pass
else:
self.process_method['catFill'][colm] = method
def check_encoding_method(self, method, colm,default=False):
if not self.is_target_available() and (method.lower() == list(cs.target_encoding_method_change.keys())[0]):
method = cs.target_encoding_method_change[method.lower()]
if default:
self.log.info(f"Applying Label encoding instead of Target encoding on feature '{colm}' as target feature is not present")
return method
def fill_encoder_value_method(self,colm, method, default=False):
if colm in self.cat_feature:
if method.lower() in cs.supported_method['categoryEncoding']:
if 'catEncoder' not in self.process_method.keys():
self.process_method['catEncoder'] = {}
if method == 'na' and self.process_method['catEncoder'].get(colm, None):
pass
else:
self.process_method['catEncoder'][colm] = self.check_encoding_method(method, colm,default)
else:
self.log.info(f"-------> categorical encoding method '{method}' is not supported. supported methods are {cs.supported_method['categoryEncoding']}")
def fill_normalizer_method(self,colm, method):
if colm in self.numeric_feature:
if method in cs.supported_method['normalization']:
if 'normalization' not in self.process_method.keys():
self.process_method['normalization'] = {}
if (method == 'na' or method == 'none') and self.process_method['normalization'].get(colm, None):
pass
else:
self.process_method['normalization'][colm] = method
else:
self.log.info(f"-------> Normalization method '{method}' is not supported. supported methods are {cs.supported_method['normalization']}")
def apply_outlier(self):
inlier_indice = np.array([True] * len(self.data))
if self.process_method.get('outlier', None):
self.log.info('-------> Feature wise outlier detection:')
for k,v in self.process_method['outlier'].items():
if k in self.numeric_feature:
if v == 'iqr':
index = cs.findiqrOutlier(self.data[k])
elif v == 'zscore':
index = cs.findzscoreOutlier(self.data[k])
elif v == 'disable':
index = None
if k in self.process_method['outlierOperation'].keys():
if self.process_method['outlierOperation'][k] == 'dropdata':
inlier_indice = np.logical_and(inlier_indice, index)
elif self.process_method['outlierOperation'][k] == 'average':
mean = self.data[k].mean()
index = ~index
self.data.loc[index,[k]] = mean
self.log.info(f'-------> {k}: Replaced by Mean {mean}: total replacement {index.sum()}')
elif self.process_method['outlierOperation'][k] == 'nochange' and v != 'disable':
self.log.info(f'-------> Total outliers in "{k}": {(~index).sum()}')
if self.config.get('outlierDetection',None):
if self.config['outlierDetection'].get('IsolationForest','False') == 'True':
if self.numeric_feature:
index = cs.findiforestOutlier(self.data[self.numeric_feature])
inlier_indice = np.logical_and(inlier_indice, index)
self.log.info(f'-------> Numeric feature based Outlier detection(IsolationForest):')
if inlier_indice.sum() != len(self.data):
self.__update_index(inlier_indice, 'outlier')
def fill_outlier_method(self,colm, method):
if colm in self.numeric_feature:
if method in cs.supported_method['outlier_column_wise']:
if 'outlier' not in self.process_method.keys():
self.process_method['outlier'] = {}
if method not in ['Disable', 'na']:
self.process_method['outlier'][colm] = method
else:
self.log.info(f"-------> outlier detection method '{method}' is not supported for column wise. supported methods are {cs.supported_method['outlier_column_wise']}")
def fill_outlier_process(self,colm, method):
if colm in self.numeric_feature:
if method in cs.supported_method['outlierOperation']:
if 'outlierOperation' not in self.process_method.keys():
self.process_method['outlierOperation'] = {}
self.process_method['outlierOperation'][colm] = method
else:
self.log.info(f"-------> outlier process method '{method}' is not supported for column wise. supported methods are {cs.supported_method['outlierOperation']}")
def get_cat_imputer(self,method):
if method == 'mode':
return SimpleImputer(strategy='most_frequent')
elif method == 'zero':
return SimpleImputer(strategy='constant', fill_value=0)
def get_cat_encoder(self,method):
if method == 'labelencoding':
return OrdinalEncoder()
elif method == 'onehotencoding':
return OneHotEncoder(sparse=False,handle_unknown="ignore")
elif method == 'targetencoding':
if not self.is_target_available():
raise ValueError('Can not apply Target Encoding when target feature is not present')
return TargetEncoder()
def get_num_imputer(self,method):
if method == 'mode':
return SimpleImputer(strategy='most_frequent')
elif method == 'mean':
return SimpleImputer(strategy='mean')
elif method == 'median':
return SimpleImputer(strategy='median')
elif method == 'knnimputer':
return KNNImputer()
elif method == 'zero':
return SimpleImputer(strategy='constant', fill_value=0)
def get_num_scaler(self,method):
if method == 'minmax':
return MinMaxScaler()
elif method == 'standardscaler':
return StandardScaler()
elif method == 'lognormal':
return PowerTransformer(method='yeo-johnson', standardize=False)
def recommenderStartProfiler(self,modelFeatures):
return cs.recommenderStartProfiler(self,modelFeatures)
def folderPreprocessing(self,folderlocation,folderdetails,deployLocation):
return cs.folderPreprocessing(self,folderlocation,folderdetails,deployLocation)
def textSimilarityStartProfiler(self, doc_col_1, doc_col_2):
return cs.textSimilarityStartProfiler(self, doc_col_1, doc_col_2)
def get_conversion_method(self):
return cs.get_one_true_option(self.config.get('textConversionMethod','')).lower()
def set_features(features,profiler=None):
return cs.set_features(features,profiler)
<s> import os
import sys
import numpy as np
import scipy
import pandas as pd
from pathlib import Path
default_config = {
'misValueRatio': '1.0',
'numericFeatureRatio': '1.0',
'categoryMaxLabel': '20',
'str_to_cat_len_max': 10
}
target_encoding_method_change = {'targetencoding': 'labelencoding'}
supported_method = {
'fillNa':
{
'categorical' : ['mode','zero','na'],
'numeric' : ['median','mean','knnimputer','zero','drop','na'],
},
'categoryEncoding': ['labelencoding','targetencoding','onehotencoding','na','none'],
'normalization': ['standardscaler','minmax','lognormal', 'na','none'],
'outlier_column_wise': ['iqr','zscore', 'disable', 'na'],
'outlierOperation': ['dropdata', 'average', 'nochange']
}
def findiqrOutlier(df):
Q1 = df.quantile(0.25)
Q3 = df.quantile(0.75)
IQR = Q3 - Q1
index = ~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR)))
return index
def findzscoreOutlier(df):
z = np.abs(scipy.stats.zscore(df))
index = (z < 3)
return index
def findiforestOutlier(df):
from sklearn.ensemble import IsolationForest
isolation_forest = IsolationForest(n_estimators=100)
isolation_forest.fit(df)
y_pred_train = isolation_forest.predict(df)
return y_pred_train == 1
def get_one_true_option(d, default_value=None):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
def get_boolean(value):
if (isinstance(value, str) and value.lower() == 'true') or (isinstance(value, bool) and value == True):
return True
else:
return False
def recommenderStartProfiler(self,modelFeatures):
try:
self.log.info('----------> FillNA:0')
self.data = self.data.fillna(value=0)
self.log.info('Status:- !... Missing value treatment done')
self.log.info('----------> Remove Empty Row')
self.data = self.data.dropna(axis=0,how='all')
self.log.info('Status:- !... Empty feature treatment done')
userId,itemId,rating = modelFeatures |
.split(',')
self.data[itemId] = self.data[itemId].astype(np.int32)
self.data[userId] = self.data[userId].astype(np.int32)
self.data[rating] = self.data[rating].astype(np.float32)
return self.data
except Exception as inst:
self.log.info("Error: dataProfiler failed "+str(inst))
return(self.data)
def folderPreprocessing(self,folderlocation,folderdetails,deployLocation):
try:
dataset_directory = Path(folderlocation)
dataset_csv_file = dataset_directory/folderdetails['label_csv_file_name']
tfrecord_directory = Path(deployLocation)/'Video_TFRecord'
from savp import PreprocessSAVP
import csv
csvfile = open(dataset_csv_file, newline='')
csv_reader = csv.DictReader(csvfile)
PreprocessSAVP(dataset_directory,csv_reader,tfrecord_directory)
dataColumns = list(self.data.columns)
VideoProcessing = True
return dataColumns,VideoProcessing,tfrecord_directory
except Exception as inst:
self.log.info("Error: dataProfiler failed "+str(inst))
def textSimilarityStartProfiler(self, doc_col_1, doc_col_2):
import os
try:
features = [doc_col_1, doc_col_2]
pipe = None
dataColumns = list(self.data.columns)
self.numofCols = self.data.shape[1]
self.numOfRows = self.data.shape[0]
from transformations.textProfiler import textProfiler
self.log.info('-------> Execute Fill NA With Empty String')
self.data = self.data.fillna(value=" ")
self.log.info('Status:- |... Missing value treatment done')
self.data[doc_col_1] = textProfiler().textCleaning(self.data[doc_col_1])
self.data[doc_col_2] = textProfiler().textCleaning(self.data[doc_col_2])
self.log.info('-------> Concatenate: ' + doc_col_1 + ' ' + doc_col_2)
self.data['text'] = self.data[[doc_col_1, doc_col_2]].apply(lambda row: ' '.join(row.values.astype(str)), axis=1)
from tensorflow.keras.preprocessing.text import Tokenizer
pipe = Tokenizer()
pipe.fit_on_texts(self.data['text'].values)
self.log.info('-------> Tokenizer: Fit on Concatenate Field')
self.log.info('Status:- |... Tokenizer the text')
self.data[doc_col_1] = self.data[doc_col_1].astype(str)
self.data[doc_col_1] = self.data[doc_col_1].astype(str)
return (self.data, pipe, self.target_name, features)
except Exception as inst:
self.log.info("StartProfiler failed " + str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
def set_features(features,profiler=None):
if profiler:
features = [x for x in features if x not in profiler.added_features]
return features + profiler.text_feature
return features<s> # -*- coding: utf-8 -*-
"""
Created on Wed Jun 15 14:36:11 2022
@author: @aionteam
"""
import flwr
import flwr as fl
import tensorflow as tf
from typing import Any, Callable, Dict, List, Optional, Tuple
import utils
from sklearn.metrics import log_loss
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, mean_absolute_error,r2_score
from typing import Dict
import numpy as np
import logging
import os
# import sys
from flwr.common.logger import log
from logging import INFO
import pickle as pkl
from flwr.server.client_proxy import ClientProxy
import dl_model
from sklearn.preprocessing import StandardScaler
import pandas as pd
## Below import can be used when aion specific grpc communication used.
# from aionflgrpcserver import aionflgrpcserver
# Make TensorFlow logs less verbose
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
os.environ["GRPC_VERBOSITY"] = "debug"
# """ AION Federated Learning Server. Geting weights from clients, aggregate the weights by FedAvg algorithm and update the client model."""
class flserver():
def __init__(self,df,confdata):
self.log = logging.getLogger('AION')
## Below params will be used later
self.confparams=confdata
self.df=df
self.fl_round=0
print("Inside flserver init func")
## Flower server number of rounds for fl model update (clients-server)
def fit_round(self,rnd: int) -> Dict:
"""Send round number to client."""
self.fl_round=rnd
log(INFO, "===========================")
self.log.info("federated learning round: "+str(rnd))
log(INFO, "federated learning round: %s ",str(rnd))
log(INFO, "===========================")
# print(f"federated learning round: {rnd}")
return {"rnd": rnd}
def fit_config(self,rnd: int) -> Dict:
"""Send round number to client."""
self.round_count = rnd
log(INFO, "===========================")
log(INFO, "Starting round %s ...",str(rnd))
log(INFO, "===========================")
model_hyperparams = self.confparams["model_hyperparams"]
batch_size = model_hyperparams["batch_size"]
local_epochs = model_hyperparams["epochs"]
config = {
"batch_size": int(batch_size),
# "local_epochs": 1 if rnd < 2 else 2,
"local_epochs": int(local_epochs),
"rnd": rnd,
}
return config
def evaluate_config(self, rnd: int):
model_hyperparams = self.confparams["model_hyperparams"]
val_batch_size = model_hyperparams["batch_size"]
# val_steps = 5 if rnd < 4 else 10
return {"val_batch_size": int(val_batch_size)}
## Loading configuration parameters
def configload(self,confparams):
try:
data=confparams
server_ip=str(data["server_IP"])
server_port=str(data["server_port"])
model_name=str(data["model_name"])
num_clients=int(data["min_available_clients"])
min_fit_clients=int(data["min_fit_clients"])
num_train_round=int(data["fl_round"])
data_location=str(data["data_location"])
model_params=data["model_hyperparams"]
problem_type=data["problem_type"]
server_address=f"{server_ip}:{server_port}"
# model_location=str(data["model_store"])
model_version=str(data["version"])
selected_feature=data["selected_feature"]
if (type(selected_feature) is str):
selected_feature=selected_feature.split(',')
target_feature=data["target_feature"]
evaluation_required=data["evaluation_required"]
self.log.info("Federated Learning <Server IP:Port> "+str(server_address))
except Exception as e:
log(INFO, "Reading server config file issue. Err.Msg: %s ",str(e))
return server_address,model_name,num_clients,min_fit_clients,num_train_round,data_location,model_params,problem_type,model_version,selected_feature,target_feature,evaluation_required
## Save the final model
def model_save(self,model,model_name,problem_type,version):
cwd = os.path.abspath(os.path.dirname(__file__))
model_location=os.path.join(cwd, 'models')
model_name=model_name
version=str(version)
model_name=self.confparams["model_name"]
if (model_name.lower() == "deeplearning"):
file_name = model_name + '_' +problem_type+'_'+version+ ".h5"
else:
file_name=file_name = model_name + '_' +problem_type+'_'+version+".sav"
saved_model=os.path.normpath(os.path.join(model_location,file_name))
self.log.info("saved_model path: "+str(saved_model))
try:
with open (saved_model,'wb') as f:
pkl.dump(model,f)
return True
except Exception as e:
self.log.info("fl server model save error. Error Msg: "+str(e))
return False
## Load the model, not used now. If user want to use aion trained model for evaluation at serverside, use this fn.
def model_load(self, path):
model_name=self.confparams["model_name"]
if (model_name.lower() == "deeplearning"):
loaded_model = tf.keras.models.load_model(path)
else:
loaded_model = pkl.load(open(path, 'rb'))
return loaded_model
# Fo normal ml models, def get_eval_fn, evaluate each round results with own dataset. It is optional, without this, fed server will aggregate (fedAvg) client weights and update results to clients without evaluate.
def get_eval_fn(self,model,X,y,model_name,model_version):
"""Return an evaluation function for server-side evaluation."""
self.log.info("X_eval: \\n"+str(X.shape))
self.log.info("y_eval: \\n"+str(y.shape))
# scaler = StandardScaler()
# X_scaled = scaler.fit_transform(X)
# y = pd.get_dummies(y)
# y_class = None
def evaluate(server_round: int,
parameters: fl.common.NDArrays,
config: Dict[str, fl.common.Scalar],):
# self.log.info("server side fedavg weights \\n "+str(parameters))
try:
problem_type=self.confparams["problem_type"]
# if (self.model_name.lower() == 'logisticregression' ):
# loss = log_loss(y, model.predict_proba(X))
# else:
# loss = log_loss(y, model.predict(X))
if (problem_type.lower() == 'classification'):
if (model_name.lower() == 'logisticregression' ):
utils.set_model_params(model, parameters)
loss = log_loss(y, model.predict_proba(X))
# loss = log_loss(y, model.predict_proba(X))
accuracy = model.score(X, y)
log(INFO, "Server evaluation FL Round: %s processed Weights. -- Loss: %s, -- Accuracy: %s ",str(self.fl_round),str(loss), str(accuracy))
self.log.info("Accuracy: "+str(accuracy))
self.log.info("model coefficients: "+str(model.coef_))
self.log.info("model intercept: "+str(model.intercept_))
problem_type=self.confparams["problem_type"]
self.model_save(model,model_name,problem_type,model_version)
return loss, {"accuracy": accuracy}
else:
if (model_name.lower() == 'linearregression' ):
print(model, type(model))
print(model.get_params)
# rmse = mean_squared_error(y, model.predict(X), square=True)
rmse = np.sqrt(mean_squared_error(y, model.predict(X)))
mae = mean_absolute_error(y, model.predict(X))
r2=r2_score(y, model.predict(X))
loss = rmse
mse=mean_squared_error(y, model.predict(X))
rmse = np.sqrt(mean_squared_error(y, model.predict(X)))
mae = mean_absolute_error(y, model.predict(X))
r2=r2_score(y, model.predict(X))
loss = rmse
results = {
"mean_absolute_error": mae,
"mean_squared_error": mse,
"root_mean_squared_error": rmse,
"r2":r2,
}
# accuracy=r2
log(INFO, "Server evaluation FL Round: %s processed Weights. -- Loss: %s, -- metrics: %s ",str(self.fl_round),str(rmse), str(results))
self.log.info("model coefficients: "+str(model.coef_))
self.log.info("model intercept: "+str(model.intercept_))
self.model_save(model,model_name,problem_type,model_version)
# return loss, len(X), results
return loss, results
except Exception as e:
log(INFO, "evaluate error msg: %s ",str(e))
return evaluate
# for deep learn models, def get_eval_fn, evaluate each round results with own dataset. It is optional, without this, fed server will aggregate (fedAvg) client weights and update results to clients without evaluate.
def get_eval_fn_dl(self, model,X,y,model_name,model_version):
try:
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# y = pd.get_dummies(y)
y_class = None
def evaluate(
server_round: int,
weights: fl.common.NDArrays,
config: Dict[str, fl.common.Scalar],
) -> Optional[Tuple[float, Dict[str, fl.common.Scalar]]]:
# Update model with the latest parameters
model.set_weights(weights)
problem_type = self.confparams["problem_type"]
self.model_save(model, model_name,problem_type, model_version)
if model_name == 'deeplearning':
if problem_type == 'regression':
loss, mean_absolute_error, mean_squared_error = model.evaluate(X_scaled,
y,
verbose=1)
y_pred = model.predict(X_scaled)
from sklearn import metrics
root_mean_squared_error = |
np.sqrt(metrics.mean_squared_error(y, y_pred))
log(INFO, "global model mean_absolute_error: %f ",mean_absolute_error)
log(INFO, "global model mean_squared_error: %f ",mean_squared_error)
log(INFO, "global model root_mean_squared_error: %f ",root_mean_squared_error)
return loss, {"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"root_mean_squared_error": root_mean_squared_error}
if problem_type == 'classification':
y_class = pd.get_dummies(y)
loss, accuracy = model.evaluate(X_scaled, y_class, verbose=1)
log(INFO, "global model accuracy: %f ",round(accuracy * 100, 2))
log(INFO, "global model loss: %f ", round(loss, 2))
return loss, {"accuracy": accuracy}
except Exception as e:
log(INFO, "get_eval_fn_dl error: %s ",str(e))
return evaluate
""" Below part is the aion specific grpc functions. To start the grpc server and client. Currently below modules are not used. """
# def callaiongrpcserver(self):
# agrpcobj = aionflgrpcserver()
# status=agrpcobj.startgrpcerver()
# print("server grpc start status: \\t",status)
# return status
# def stopaiongrpcserver(self):
# agrpcobj = aionflgrpcserver()
# status=agrpcobj.shutserver()
# print("server grpc stop status: \\t",status)
# return status
## This function called from aionflmain.py, and run server.
## Getting flower fl strategy
def get_strategy(self,min_fit_clients,num_clients,eval_fn,on_fit_config_fn,on_evaluate_config_fn):
strategy = fl.server.strategy.FedAvg(
min_fit_clients=min_fit_clients,
min_available_clients=num_clients,
evaluate_fn=eval_fn,
on_fit_config_fn=on_fit_config_fn,
on_evaluate_config_fn=on_evaluate_config_fn,
# initial_parameters=fl.common.weights_to_parameters(model.get_weights()),
)
return strategy
def runFLServer(self):
try:
server_address,model_name,num_clients,min_fit_clients,num_train_round,data_location,model_params,problem_type,model_version,selected_feature,target_feature,evaluation_required = self.configload(self.confparams)
df = self.df
if (evaluation_required.lower() == 'true'):
## One more check for NaN,Inf occurance in dataframe
df =df[~df.isin([np.nan, np.inf, -np.inf]).any(axis=1)]
## Remove index if passed.
df=df.reset_index(drop=True)
y=df[target_feature]
X=df[selected_feature]
if (problem_type.lower() == "classification"):
if (model_name.lower() == "logisticregression"):
#n_classes = df[target_feature].nunique()
no_classes = len(df.groupby(target_feature).count())
no_features=len(selected_feature)
self.log.info("no_classes: "+str(no_classes))
self.log.info("no_features: "+str(no_features))
modelName="logisticregression"
try:
model = LogisticRegression(**model_params, warm_start=True)
except Exception as e:
self.log.info("LR model error: \\n"+str(e))
status=utils.setmodelName(modelName)
utils.set_initial_params(model,no_classes,no_features)
eval_fn=self.get_eval_fn(model,X,y,model_name,model_version)
on_fit_config_fn=self.fit_round
on_evaluate_config_fn=None
min_fit_clients=2
strategy = self.get_strategy(min_fit_clients,num_clients,eval_fn,on_fit_config_fn,on_evaluate_config_fn)
elif (model_name.lower() == "deeplearning"):
# model_hyperparams = self.confparams["model_hyperparams"]
optimizer = model_params["optimizer"]
loss_func = model_params["losses"]
act_func = model_params["activation"]
last_act_func = model_params["last_activation"]
input_shape = X.shape[1] # len(selected_feature)
output_shape = len(y.value_counts())
model = None
if output_shape == 2:
if last_act_func == "sigmoid" and loss_func == "binary_crossentropy":
model = dl_model.dl_binary_classification(input_shape, output_shape,
optimizer, loss_func,
act_func, last_act_func)
elif last_act_func == "softmax" and loss_func == "categorical_crossentropy":
model = dl_model.dl_binary_classification(input_shape, output_shape,
optimizer, loss_func,
act_func, last_act_func)
else:
model = dl_model.dl_multiClass_classification(input_shape,
output_shape, optimizer, loss_func,
act_func, last_act_func)
print(model.summary())
eval_fn=self.get_eval_fn_dl(model,X,y,model_name,model_version)
on_fit_config_fn=self.fit_config
on_evaluate_config_fn=self.evaluate_config
strategy = self.get_strategy(min_fit_clients,num_clients,eval_fn,on_fit_config_fn,on_evaluate_config_fn)
elif(problem_type.lower() == "regression"):
if (model_name.lower() == "linearregression"):
model=LinearRegression(**model_params)
status=utils.setmodelName(model_name)
utils.set_initial_params_reg(model,X.shape[0],len(selected_feature))
# utils.set_initial_params_reg(model,X.shape[0],X.shape[1])
eval_fn=self.get_eval_fn(model,X,y,model_name,model_version)
on_fit_config_fn=self.fit_round
on_evaluate_config_fn=None
min_fit_clients=2
strategy = self.get_strategy(min_fit_clients,num_clients,eval_fn,on_fit_config_fn,on_evaluate_config_fn)
elif(model_name.lower() == "deeplearning"):
input_shape = X.shape[1] # len(selected_feature)
output_shape = len(y.value_counts())
optimizer = model_params["optimizer"]
loss_func = model_params["losses"]
act_func = model_params["activation"]
model = None
model = dl_model.dl_regression_model(input_shape, 1,
optimizer, loss_func, act_func)
eval_fn=self.get_eval_fn_dl(model,X,y,model_name,model_version)
on_fit_config_fn=self.fit_config
on_evaluate_config_fn=self.evaluate_config
strategy = self.get_strategy(min_fit_clients,num_clients,eval_fn,on_fit_config_fn,on_evaluate_config_fn)
elif (evaluation_required.lower() == 'false'):
eval_fn=None
if (model_name.lower() == "deeplearning"):
# min_fit_clients =int( model_params["min_fit_clients"])
on_fit_config_fn=self.fit_config
on_evaluate_config_fn=self.evaluate_config
strategy = self.get_strategy(min_fit_clients,num_clients,eval_fn,on_fit_config_fn,on_evaluate_config_fn)
else:
min_fit_clients=0
on_fit_config_fn=self.fit_round
on_evaluate_config_fn=None
# strategy = self.get_strategy(min_fit_clients,num_clients,eval_fn,on_fit_config_fn,on_evaluate_config_fn)
strategy = fl.server.strategy.FedAvg(
min_available_clients=num_clients,
eval_fn=None,
on_fit_config_fn=self.fit_round,)
else:
log(INFO, "Please opt server evaluation as True or False in server configuration file.")
log(INFO, "Federated learning Server started at @: %s ",str(server_address))
server_rnd=1
while (1):
try:
fl.server.start_server(server_address=server_address, strategy=strategy, config=fl.server.ServerConfig(num_rounds=num_train_round))# config={"num_rounds": num_train_round})#config=fl.server.ServerConfig(num_rounds=3) #,force_final_distributed_eval=True)
except Exception as e:
log(INFO, "Server exception: %s ",str(e))
log(INFO, "AION federated learning server completed for execution cycle: %s ",str(server_rnd))
# Evaluate the final trained model
server_rnd+=1
log(INFO, "AION federated learning server execution successfully completed. Please check the log file for more information.")
return True
except Exception as e:
self.log.info("AION Federated Learning Server run error. Error Msg: "+str(e))
log(INFO, "Server not executing, err.msg: %s ",str(e))
return False
# Start Flower server for n rounds of federated learning
# if __name__ == "__main__":
# ''' Testing purpose code '''
# super_obj=flserver1()
# json_file=sys.argv[1]
# super_obj.log.info("User json_file: \\n"+str(json_file))
# # configfile=None
# server_address,model_name,num_clients,num_train_round,data_location,model_version,model_version,selected_feature,target_feature = super_obj.configload(super_obj.confparams)
# df = pd.read_csv(data_location)
# # df=super_obj.df
# df =df[~df.isin([np.nan, np.inf, -np.inf]).any(1)]
# df=df.reset_index(drop=True)
# y=df[target_feature]
# X = df.drop(target_feature, axis=1)
# no_classes = len(df.groupby(target_feature).count())
# no_features=len(selected_feature.split(','))
# print("no_classes: \\n",no_classes)
# print("no_features: \\n",no_features)
# # num_classes = y_train.apply(pd.Series.nunique)
# if (model_name.lower() == "logisticregression"):
# modelName="logisticregression"
# model = LogisticRegression(penalty="l2",max_iter=10, warm_start=True)
# ## May be used in future (model load for server side eval)
# # model=super_obj.model_load(model_location)
# status=utils.setmodelName(modelName)
# utils.set_initial_params(model,no_classes,no_features)
# strategy = fl.server.strategy.FedAvg(
# min_available_clients=num_clients,
# eval_fn=super_obj.get_eval_fn(model,X,y),
# on_fit_config_fn=super_obj.fit_round,)
# # super_obj.log.info("Stating federated learning server.....\\n")
# log(INFO, "Stating AION federated learning server.....")
# fl.server.start_server(server_address, strategy=strategy, config={"num_rounds": num_train_round})
# # super_obj.log.info("federated learning server execution completed.\\n")
# log(INFO, "AION federated learning server execution completed.....")
<s> import tensorflow as tf
def dl_regression_model(input_shape, output_shape,
optimizer, loss_func, act_func):
inputs = tf.keras.Input(shape=(input_shape,))
x = tf.keras.layers.Dense(64,
kernel_initializer='he_normal',
bias_initializer='zeros',
activation=act_func)(inputs)
x = tf.keras.layers.Dense(32,
kernel_initializer='he_normal',
bias_initializer='zeros',
activation=act_func)(x)
x = tf.keras.layers.Dense(16,
kernel_initializer='he_normal',
bias_initializer='zeros',
activation=act_func)(x)
x = tf.keras.layers.Dense(8,
kernel_initializer='he_normal',
bias_initializer='zeros',
activation=act_func)(x)
outputs = tf.keras.layers.Dense(output_shape,
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
model.compile(loss=loss_func ,
optimizer=optimizer,
metrics=["mean_absolute_error",
"mean_squared_error",
])
return model
def dl_multiClass_classification(input_shape, output_shape,
optimizer, loss_func, act_func, last_act_func):
inputs = tf.keras.Input(shape=(input_shape,))
x = tf.keras.layers.Dense(64,
kernel_initializer='he_normal',
bias_initializer='zeros',
activation=act_func)(inputs)
x = tf.keras.layers.Dense(32,
kernel_initializer='he_normal',
|
bias_initializer='zeros',
activation=act_func)(x)
x = tf.keras.layers.Dense(16,
kernel_initializer='he_normal',
bias_initializer='zeros',
activation=act_func)(x)
x = tf.keras.layers.Dense(8,
kernel_initializer='he_normal',
bias_initializer='zeros',
activation=act_func)(x)
outputs = tf.keras.layers.Dense(output_shape,
kernel_initializer='he_normal',
bias_initializer='zeros',
activation=last_act_func)(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer, loss_func, metrics=["accuracy"])
return model
def dl_binary_classification(input_shape, output_shape,
optimizer, loss_func, act_func, last_act_func):
inputs = tf.keras.Input(shape=(input_shape,))
x = tf.keras.layers.Dense(64,
kernel_initializer='he_normal',
bias_initializer='zeros',
activation=act_func)(inputs)
x = tf.keras.layers.Dense(32,
kernel_initializer='he_normal',
bias_initializer='zeros',
activation=act_func)(x)
x = tf.keras.layers.Dense(16,
kernel_initializer='he_normal',
bias_initializer='zeros',
activation=act_func)(x)
x = tf.keras.layers.Dense(8,
kernel_initializer='he_normal',
bias_initializer='zeros',
activation=act_func)(x)
outputs = tf.keras.layers.Dense(output_shape,
kernel_initializer='he_normal',
bias_initializer='zeros',
activation=last_act_func)(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer, loss_func,
metrics=["accuracy"])
return model
<s><s> from typing import Tuple, Union, List
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import SGDClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from flwr.common.logger import log
from logging import INFO
XY = Tuple[np.ndarray, np.ndarray]
Dataset = Tuple[XY, XY]
LogRegParams = Union[XY, Tuple[np.ndarray]]
XYList = List[XY]
modelUsed=None
modelname=None
def setmodelName(modelselected):
try:
modelname=str(modelselected)
print("setmodelName ,given modelname: \\n",modelname)
if (modelname.lower() == 'logisticregression'):
modelUsed=LogisticRegression()
return True
elif (modelname.lower() == "linearregression"):
modelUsed = LinearRegression()
return True
elif (modelname.lower() == "sgdclassifier"):
#from sklearn.linear_model import SGDClassifier
modelUsed=SGDClassifier()
return True
elif (modelname.lower() == "knn"):
modelUsed = KNeighborsClassifier()
return True
elif (modelname.lower() == "decisiontreeclassifier"):
modelUsed = DecisionTreeClassifier()
return True
else:
return False
except Exception as e:
log(INFO, "set fl model name fn issue: ",e)
def get_model_parameters(model:modelUsed) -> LogRegParams:
"""Returns the paramters of a sklearn LogisticRegression model."""
model_name=model.__class__.__name__
if model.fit_intercept:
params = (model.coef_, model.intercept_)
else:
params = (model.coef_,)
return params
def set_model_params(
model:modelUsed, params: LogRegParams
) -> modelUsed:
"""Sets the parameters of a sklean LogisticRegression model."""
model.coef_ = params[0]
model_name=model.__class__.__name__
try:
if model.fit_intercept:
model.intercept_ = params[1]
except Exception as e:
log(INFO, "set_model_params fn issue: ",e)
pass
return model
def set_initial_params_reg(model,no_vals,no_features):
"""Sets initial parameters as zeros Required since model params are
uninitialized until model.fit is called.
But server asks for initial parameters from clients at launch. Refer
to sklearn.linear_model.LogisticRegression documentation for more
information.
"""
no_vals = no_vals
n_features = no_features
# model.classes_ = np.array([i for i in range(n_classes)])
model.coef_ = np.zeros( n_features,)
model_name=model.__class__.__name__
try:
if model.fit_intercept:
# model.intercept_ = np.ones((no_vals,1))
model.intercept_ = np.zeros((no_vals,))
except Exception as e:
log(INFO, "set_initial_params fn issue: ",e)
pass
def set_initial_params(model,no_classes,no_features):
"""Sets initial parameters as zeros Required since model params are
uninitialized until model.fit is called.
But server asks for initial parameters from clients at launch. Refer
to sklearn.linear_model.LogisticRegression documentation for more
information.
"""
n_classes = no_classes
n_features = no_features
model.classes_ = np.array([i for i in range(n_classes)])
model.coef_ = np.zeros((n_classes, n_features))
model_name=model.__class__.__name__
try:
if model.fit_intercept:
model.intercept_ = np.zeros((n_classes,))
except Exception as e:
log(INFO, "set_initial_params fn issue: ",e)
pass
def shuffle(X: np.ndarray, y: np.ndarray) -> XY:
"""Shuffle X and y."""
rng = np.random.default_rng()
idx = rng.permutation(len(X))
return X[idx], y[idx]
def partition(X: np.ndarray, y: np.ndarray, num_partitions: int) -> XYList:
"""Split X and y into a number of partitions."""
return list(
zip(np.array_split(X, num_partitions), np.array_split(y, num_partitions))
)
<s> # -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import logging
import os
import sys
from flwr.common.logger import log
from logging import INFO
from flserver import flserver
class aionfls:
def __init__(self):
self.confdata=None
def configLoad(self,jsonfile):
import json
jsonfile=str(jsonfile)
with open(jsonfile, 'r') as file:
self.confdata = json.load(file)
return self.confdata
def dataload(self,datapath):
df = pd.read_csv(datapath) #chunk_size=50000
## Data preprocess in test dataset, In aion, aion profiler will handle it.
df =df[~df.isin([np.nan, np.inf, -np.inf]).any(axis=1)]
df=df.reset_index(drop=True)
return df
# Start Flower server for n rounds of federated learning
if __name__ == "__main__":
classobj=aionfls()
json_file=sys.argv[1]
confdata = classobj.configLoad(json_file)
data_location = confdata["data_location"]
# deploy_location=confdata['deploy_location']
cwd = os.path.abspath(os.path.dirname(__file__))
model_name=confdata['model_name']
version=str(confdata['version'])
file_name=model_name+'_'+version+".log"
try:
fl_log=os.path.normpath(os.path.join(cwd,'logs',file_name))
except Exception as e:
classobj.log.info("Log path error. Error Msg: \\n",e)
logging.basicConfig(filename=fl_log,format='%(asctime)s %(message)s', filemode='w',level=logging.DEBUG)
classobj.log = logging.getLogger('AION')
print("===============")
print("flserver main function")
print("===============")
if (confdata['evaluation_required'].lower() == 'false'):
df=None
aionflobj=flserver(df,confdata)
print("flserver condition is false")
else:
## User selected option is True
print("flserver condition is true")
data_location = os.path.normpath(os.path.join(cwd, data_location))
# print("current data_location",data_location)
df=classobj.dataload(data_location)
aionflobj=flserver(df,confdata)
status=aionflobj.runFLServer()
classobj.log.info("Aion FL Server run Status: \\n"+str(status))
<s><s><s><s><s><s> # -*- coding: utf-8 -*-
"""
Created on Sat Sep 10 23:57:56 2022
"""
import numpy as np
import pandas as pd
from secrets import token_bytes
from ppxgboost import PaillierAPI as paillier
from ppxgboost import BoosterParser as boostparser
from ppxgboost import PPBooster as ppbooster
from ppxgboost.PPBooster import MetaData
from ppxgboost.PPKey import PPBoostKey
# from ope.pyope.ope import OPE
from pyope.ope import OPE
import joblib
import logging
import os
from flask import Flask,request,jsonify,render_template
# import pickle
from flask import Flask, request, jsonify
import json
import jsonpickle
import os.path
import time
import subprocess
import sys
from os.path import expanduser
import ntpath
import shutil
import platform
from pathlib import Path
home = expanduser("~")
if platform.system() == 'Windows':
LOG_FILE_PATH = os.path.join(home,'AppData','Local','HCLT','AION','logs')
else:
LOG_FILE_PATH = os.path.join(home,'HCLT','AION','logs')
app = Flask(__name__)
class server_ppxgboost:
def __init__(self):
# self.problemtype=problemtype
self.confdata=None
## Loading config file
def configload(self):
cwd = os.path.abspath(os.path.dirname(__file__))
file_name='config.json'
try:
config_file=os.path.normpath(os.path.join(cwd,'config',file_name))
except Exception as e:
print("config path error. Error Msg: \\n",e)
with open(config_file, 'r') as file:
data = json.load(file)
model_name=str(data["model_name"])
# version=str(data["version"])
return model_name
## Load server xgboost model from ../model dir.
def model_load( self,path):
cwd = os.path.abspath(os.path.dirname(__file__))
file_name=path
try:
model_location=os.path.normpath(os.path.join(cwd,'model',file_name))
except Exception as e:
print("Model path error. Error Msg: \\n",e)
# print(path)
loaded_model = joblib.load(model_location)
return loaded_model
## Generate Encrypted prediction fn
def ppxgboostpredict_server(self,model,ppBoostKey,clientdata,min_max):
xgboost_model = model
meta_min_max = MetaData(min_max)
p_trees, feature_set, min_max = boostparser.model_to_trees(xgboost_model, min_max)
enc_trees = ppbooster.enc_xgboost_model(ppBoostKey, p_trees, MetaData(min_max))
enc_client_data=clientdata
# enc_predictions = ppbooster.predict_binary(enc_trees, X_test) # should rename the function
enc_predictions = ppbooster.predict_binary(p_trees, enc_client_data)
return enc_predictions
## XGBoost wrapper for native model (native model to scikit learn xgboost model)
def xgboostwrappertonative(self,wrappermodel):
nativemodel = wrappermodel.get_booster()
return nativemodel
def training_dataset_parser(self,train_data: pd.DataFrame):
"""
:param train_data: dataframe training data
:return: minimum of the training dataset, and maximum of the training dataset.
"""
return {'min': np.min(pd.DataFrame.min(train_data)), 'max': np.max(pd.DataFrame.max(train_data))}
## Homomorphic secure main server
cls_obj=server_ppxgboost()
@app.route('/homomorphicprediction_server_api',methods=['GET','POST'])
def main_server():
data = request.get_json(force=True)
response_data = json.dumps(data)
json_in= json.loads(response_data)
values = json_in['values']
features=json_in['features']
ppBoostKey=jsonpickle.decode(json_in['key'])
encrypted_clientdata=pd.DataFrame(values,columns =features)
## Create encrypted predition from model
model=None
min_max = {'min': 0, 'max': 1000}
model_name = cls_obj.configload()
# model_name=usecase_name
model_location=model_name
model_xgboost = cls_obj.model_load(model_location)
try:
## For sklearn based xgboost model to native model
model = cls_obj.xgboostwrappertonative(model_xgboost)
except:
## For native xgboost,we dont need to get booster.
model= model_xgboost
## FFor logging
cwd = os.path.abspath(os.path.dirname(__file__))
# model_name=model_name
file_name = |
model_name.rsplit('.', 1)
file_name=file_name[0]
file_name=file_name+".log"
try:
hm_log=os.path.normpath(os.path.join(cwd,'logs',file_name))
os.makedirs(os.path.dirname(hm_log), exist_ok=True)
except Exception as e:
print("Log path error. Error Msg: \\n",e)
logging.basicConfig(filename=hm_log,format='%(asctime)s %(message)s', filemode='w',level=logging.DEBUG)
log = logging.getLogger('AION')
log.info('Server regression encryption service started...')
print("Encrypted client data is ....\\n")
log.info("Encrypted client data is (received by server): \\n"+str(encrypted_clientdata))
print("Client side encrypted data: \\n",encrypted_clientdata)
enc_predictions = cls_obj.ppxgboostpredict_server(model,ppBoostKey,encrypted_clientdata,min_max)
log.info("server side encrypted prediction: \\n"+str(enc_predictions))
## Serialize the ppboost encrypted prediction by jsonpickle, normal pikle lib not working.
enc_predictions_json = jsonpickle.encode(enc_predictions)
# enc_predictions_json = enc_predictions.to_json()
return enc_predictions_json
if __name__ == '__main__':
#app.run(debug=True)
app.run(host="localhost", port=9000, debug=True)<s><s> # -*- coding: utf-8 -*-
"""
Created on Sat Sep 10 23:57:56 2022
@author: jayaramakrishnans
"""
import numpy as np
import pandas as pd
from secrets import token_bytes
from ppxgboost import PaillierAPI as paillier
from ppxgboost import BoosterParser as boostparser
from ppxgboost import PPBooster as ppbooster
from ppxgboost.PPBooster import MetaData
from ppxgboost.PPKey import PPBoostKey
# from ope.pyope.ope import OPE
from pyope.ope import OPE
import joblib
import logging
import os
from flask import Flask,request,jsonify,render_template
# import pickle
from flask import Flask, request, jsonify
import json
import jsonpickle
app = Flask(__name__)
class server_ppxgboost:
def __init__(self):
# self.problemtype=problemtype
self.confdata=None
print("Inside server_ppxgboost_1\\n")
## Loading config file
def configload(self):
print("Inside server_ppxgboost_1,configload\\n")
cwd = os.path.abspath(os.path.dirname(__file__))
file_name='config.json'
try:
config_file=os.path.normpath(os.path.join(cwd,'config',file_name))
except Exception as e:
print("config path error. Error Msg: \\n",e)
with open(config_file, 'r') as file:
data = json.load(file)
model_name=str(data["model_name"])
# version=str(data["version"])
return model_name
## Load server xgboost model from ../model dir.
def model_load( self,path):
print("Inside server_ppxgboost_1,model_load\\n")
cwd = os.path.abspath(os.path.dirname(__file__))
file_name=path
try:
model_location=os.path.normpath(os.path.join(cwd,'model',file_name))
except Exception as e:
print("Model path error. Error Msg: \\n",e)
# print(path)
loaded_model = joblib.load(model_location)
return loaded_model
## Generate Encrypted prediction fn
def ppxgboostpredict_server(self,model,ppBoostKey,clientdata,min_max):
xgboost_model = model
meta_min_max = MetaData(min_max)
p_trees, feature_set, min_max = boostparser.model_to_trees(xgboost_model, min_max)
enc_trees = ppbooster.enc_xgboost_model(ppBoostKey, p_trees, MetaData(min_max))
enc_client_data=clientdata
# enc_predictions = ppbooster.predict_binary(enc_trees, X_test) # should rename the function
enc_predictions = ppbooster.predict_binary(p_trees, enc_client_data)
return enc_predictions
## XGBoost wrapper for native model (native model to scikit learn xgboost model)
def xgboostwrappertonative(self,wrappermodel):
print("Inside server_ppxgboost_7,xgboostwrappertonative= \\n",wrappermodel)
nativemodel = wrappermodel.get_booster()
return nativemodel
def training_dataset_parser(self,train_data: pd.DataFrame):
"""
:param train_data: dataframe training data
:return: minimum of the training dataset, and maximum of the training dataset.
"""
return {'min': np.min(pd.DataFrame.min(train_data)), 'max': np.max(pd.DataFrame.max(train_data))}
## Homomorphic secure main server
cls_obj=server_ppxgboost()
@app.route('/homomorphicprediction_server_api',methods=['GET','POST'])
def main_server():
data = request.get_json(force=True)
response_data = json.dumps(data)
json_in= json.loads(response_data)
values = json_in['values']
features=json_in['features']
ppBoostKey=jsonpickle.decode(json_in['key'])
encrypted_clientdata=pd.DataFrame(values,columns =features)
## Create encrypted predition from model
model=None
min_max = {'min': 0, 'max': 1000}
model_name = cls_obj.configload()
# model_name=usecase_name
model_location=model_name
model_xgboost = cls_obj.model_load(model_location)
try:
## For sklearn based xgboost model to native model
model = cls_obj.xgboostwrappertonative(model_xgboost)
except:
## For native xgboost,we dont need to get booster.
model= model_xgboost
## FFor logging
cwd = os.path.abspath(os.path.dirname(__file__))
# model_name=model_name
file_name = model_name.rsplit('.', 1)
file_name=file_name[0]
file_name=file_name+".log"
try:
hm_log=os.path.normpath(os.path.join(cwd,'logs',file_name))
os.makedirs(os.path.dirname(hm_log), exist_ok=True)
except Exception as e:
print("Log path error. Error Msg: \\n",e)
logging.basicConfig(filename=hm_log,format='%(asctime)s %(message)s', filemode='w',level=logging.DEBUG)
log = logging.getLogger('AION')
log.info('Server binary class encryption service started...')
print("Encrypted client data is ....\\n")
log.info("Encrypted client data is (received by server): \\n"+str(encrypted_clientdata))
enc_predictions = cls_obj.ppxgboostpredict_server(model,ppBoostKey,encrypted_clientdata,min_max)
log.info("server side encrypted prediction: \\n"+str(enc_predictions))
## Serialize the ppboost encrypted prediction by jsonpickle, normal pikle lib not working.
enc_predictions_json = jsonpickle.encode(enc_predictions)
# enc_predictions_json = enc_predictions.to_json()
return enc_predictions_json
if __name__ == '__main__':
#app.run(debug=True)
app.run(host="localhost", port=9000, debug=True)<s> # -*- coding: utf-8 -*-
"""
Created on Sat Sep 10 23:57:56 2022
"""
import numpy as np
import sqlite3
import sys
import pandas as pd
from secrets import token_bytes
from ppxgboost import PaillierAPI as paillier
from ppxgboost import BoosterParser as boostparser
from ppxgboost import PPBooster as ppbooster
from ppxgboost.PPBooster import MetaData
from ppxgboost.PPKey import PPBoostKey
# from ope.pyope.ope import OPE
from pyope.ope import OPE
import joblib
import logging
import os
from flask import Flask,request,jsonify,render_template
# import pickle
from flask import Flask, request, jsonify
import json
import jsonpickle
import time
from pathlib import Path
app = Flask(__name__)
class server_ppxgboost:
def __init__(self):
# self.problemtype=problemtype
print("Inside server_ppxgboost_1,init\\n")
self.confdata=None
## Loading config file
def configload(self):
cwd = os.path.abspath(os.path.dirname(__file__))
file_name='secure_config.json'
try:
config_file=os.path.normpath(os.path.join(cwd,'etc',file_name))
except Exception as e:
print("config path error. Error Msg: \\n",e)
with open(config_file, 'r') as file:
data = json.load(file)
model_name=str(data["model_name"])
# version=str(data["version"])
return model_name
## Load server xgboost model from ../model dir.
def model_load( self,path):
cwd = os.path.abspath(os.path.dirname(__file__))
file_name=path
try:
model_location=os.path.normpath(os.path.join(cwd,'model',file_name))
except Exception as e:
print("Model path error. Error Msg: \\n",e)
# print(path)
loaded_model = joblib.load(model_location)
return loaded_model
def create_connection(self,db_file):
conn = None
try:
conn = sqlite3.connect(db_file)
conn.execute('''CREATE TABLE IF NOT EXISTS modelinfo (key BLOB NOT NULL,encrypttree BLOB NOT NULL)''')
except Exception as e:
print(e)
return conn
def count_encrypt_model(self,conn):
try:
sql = "select count(*) from modelinfo"
cur = conn.cursor()
cur.execute(sql)
cur_result = cur.fetchone()
return cur_result[0]
except Exception as e:
print(e)
def create_encryptmodel(self,conn,modeldetails):
sql = ''' INSERT INTO modelinfo(key,encrypttree) VALUES(?,?) '''
cur = conn.cursor()
cur.execute(sql, modeldetails)
conn.commit()
return cur.lastrowid
def search_encryptmodel(self,conn,key):
try:
sql = "SELECT encrypttree FROM modelinfo where key='"+key+"'"
cursor = conn.execute(sql)
for row in cursor:
return row[0]
return ''
except Exception as e:
print(e)
def ppxgboostpredict_server(self,model,ppBoostKey,clientdata,min_max,noofclasses):
try:
db_file = Path(__file__).parent/'logs'/'encryptedModels'
conn = self.create_connection(db_file)
enc_trees = self.search_encryptmodel(conn,jsonpickle.encode(ppBoostKey))
if enc_trees != '':
enc_trees = jsonpickle.decode(enc_trees)
else:
if self.count_encrypt_model(conn) >= 5:
outputjson = {"status":"ERROR","msg":"Maximum Number of Encrypted"}
return json.dumps(outputjson)
xgboost_model = model
meta_min_max = MetaData(min_max)
p_trees, feature_set, min_max = boostparser.model_to_trees(xgboost_model, min_max)
enc_trees = ppbooster.enc_xgboost_model(ppBoostKey, p_trees, MetaData(min_max))
modelinfo = (jsonpickle.encode(ppBoostKey),jsonpickle.encode(enc_trees))
self.create_encryptmodel(conn,modelinfo)
enc_client_data=clientdata
# try:
# num_classes = model.n_classes_
# except:
# num_classes = noofclasses
num_classes = noofclasses
if num_classes == 0:
nc_predictions = ppbooster.predict_binary(p_trees, enc_client_data)
else:
enc_predictions = ppbooster.predict_multiclass(enc_trees, num_classes, enc_client_data)
enc_predictions_json = jsonpickle.encode(enc_predictions)
outputjson = {"status":"SUCCESS","data":enc_predictions_json}
return json.dumps(outputjson)
except Exception as e:
outputjson = {"status":"ERROR","msg":str(e)}
return json.dumps(outputjson)
## XGBoost wrapper for native model (native model to scikit learn xgboost model)
def xgboostwrappertonative(self,wrappermodel):
nativemodel = wrappermodel.get_booster()
try:
noOfClasses = wrappermodel.n_classes_
except Exception as e:
print(e)
noOfClasses = 0
return nativemodel,noOfClasses
def training_dataset_parser(self,train_data: pd.DataFrame):
"""
:param train_data: dataframe training data
:return: minimum of the training dataset, and maximum of the training dataset.
"""
return {'min': np.min(pd.DataFrame.min(train_data)), 'max': np.max(pd.DataFrame.max(train_data))}
## Homomorphic secure main server
cls_obj=server_ppxgboost()
def spredict(data):
try:
json_in= json.loads(data)
values = json_in['values']
features=json_in['features']
ppBoostKey=jsonpickle.decode(json_in['key'])
encrypted_clientdata=pd.DataFrame(values,columns =features)
model=None
min_max = {'min': 0, 'max': 1000}
model_name = cls_obj.configload()
model_location=model_name
model_xgboost = cls_obj.model_load(model_location)
try:
model,noofclasses = cls_obj.xgboostwrappertonative(model_xgboost)
except Exception as e:
print(e)
model= model_xgboost
noofclasses = |
0
cwd = os.path.abspath(os.path.dirname(__file__))
# model_name=model_name
file_name = model_name.rsplit('.', 1)
file_name=file_name[0]
file_name=file_name+".log"
try:
hm_log=os.path.normpath(os.path.join(cwd,'logs',file_name))
os.makedirs(os.path.dirname(hm_log), exist_ok=True)
except Exception as e:
print("Log path error. Error Msg: \\n",e)
logging.basicConfig(filename=hm_log,format='%(asctime)s %(message)s', filemode='w',level=logging.DEBUG)
log = logging.getLogger('AION')
log.info('Server multiclass classification encryption service started...')
log.info("Encrypted client data is (received by server): \\n"+str(encrypted_clientdata))
output = cls_obj.ppxgboostpredict_server(model,ppBoostKey,encrypted_clientdata,min_max,noofclasses)
print("predictions:",output)
print("Inside server_ppxgboost_8,output= \\n",output)
return output
except Exception as e:
outputjson = {"status":"ERROR","msg":str(e)}
output = json.dumps(outputjson)
print("predictions:",output)
return output
if __name__ == "__main__":
output = spredict(sys.argv[1])<s> # -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import logging
import os
import sys
from logging import INFO
from script.heRegression import client_ppxgboost
from script.aion_predict import selector
from script.inputprofiler import inputprofiler
import argparse
class aion_hemulticlient:
def __init__(self):
self.confdata=None
def dataload(self,datapath):
df = pd.read_csv(datapath)
## Data preprocess in test dataset, In aion, aion profiler will handle it.
# df =df[~df.isin([np.nan, np.inf, -np.inf]).any(1)]
# df=df.reset_index(drop=True)
profilerobj = inputprofiler()
df = profilerobj.run(df)
selectobj = selector()
df = selectobj.run(df)
return df
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--inputDataLocation', help='Input Data Path')
parser.add_argument('-k', '--keyGenerate', help='True')
parser.add_argument('-e', '--endPoint', help='Service End Point')
args = parser.parse_args()
if args.inputDataLocation:
dataLocation=args.inputDataLocation
if args.keyGenerate:
keyGenerate = args.keyGenerate
else:
keyGenerate='False'
print(keyGenerate)
if args.endPoint:
endPoint=args.endPoint
else:
raise('End Point Not Defined')
classobj=aion_hemulticlient()
df=classobj.dataload(dataLocation)
aionhmcobj=client_ppxgboost(df,keyGenerate,endPoint)
ppxgboost_pred=aionhmcobj.main_client()
print("final decrypted prediction at client side: \\n",ppxgboost_pred)
<s> # -*- coding: utf-8 -*-
import pandas as pd
from sklearn.model_selection import train_test_split
import numpy as np
from secrets import token_bytes
from ppxgboost import PaillierAPI as paillier
from ppxgboost import BoosterParser as boostparser
from ppxgboost import PPBooster as ppbooster
from ppxgboost.PPBooster import MetaData
from ppxgboost.PPKey import PPBoostKey
# from ope.pyope.ope import OPE
from pyope.ope import OPE
import sys
sys.path.insert(0, '..')
import logging
from logging import INFO
import pickle
import requests
import json
# from json import JSONEncoder
import jsonpickle
import os
from pathlib import Path
##Aion main client class for ppxgboost based encryption,decryption
class client_ppxgboost:
def __init__(self,data,keyGenerate,endPoint):
self.data=data
self.keyGenerate = keyGenerate
self.endPoint = endPoint
self.prediction=None
## For logging
clientDirectory = os.path.abspath(os.path.dirname(__file__))
# model_name=model_name
file_name = "he_regression"
file_name=file_name+".log"
self.keydir=os.path.join(clientDirectory,'..','keys')
os.makedirs(self.keydir, exist_ok=True)
try:
hm_log=os.path.normpath(os.path.join(clientDirectory,'logs',file_name))
os.makedirs(os.path.dirname(hm_log), exist_ok=True)
except Exception as e:
print("Log path error. Error Msg: \\n",e)
logging.basicConfig(filename=hm_log,format='%(asctime)s %(message)s', filemode='w',level=logging.DEBUG)
self.log = logging.getLogger('AION')
self.log.info('Client Regression homomorphic encryption service started...')
## Loading configuration parameters, Not used now.
def configload(self):
try:
data=self.confdata
usecase_name=str(data["usecase_name"])
version=str(data["version"])
problem_type=data["problem_type"]
model_location=str(data["model_location"])
data_location=str(data["data_location"])
selected_feature=data["selected_feature"]
if (type(selected_feature) is str):
selected_feature=selected_feature.split(',')
target_feature=data["target_feature"]
client_encryption_accuracy=str(data["client_encryption_accuracy"])
test_size=int(data["test_size"])
test_size=test_size/100
except Exception as e:
self.log.info("Reading server config file issue. Err.Msg: %s "+str(e))
return usecase_name,data_location,model_location,problem_type,version,selected_feature,target_feature,client_encryption_accuracy,test_size
## Load the model, Not used at client now.
def model_load(self, path):
loaded_model = pickle.load(open(path, 'rb'))
return loaded_model
#Generating secure key
def generate_ppboostkey(self):
try:
public_key_file = Path(__file__).parent.parent/'keys'/'public.k'
private_key_file = Path(__file__).parent.parent/'keys'/'private.k'
prf_key_file = Path(__file__).parent.parent/'keys'/'prf.k'
ope_key_file = Path(__file__).parent.parent/'keys'/'ope.k'
encryptor_key_file = Path(__file__).parent.parent/'keys'/'encryptor.k'
boostkey_key_file = Path(__file__).parent.parent/'keys'/'boostkey.k'
if not boostkey_key_file.exists() or self.keyGenerate == 'True':
public_key, private_key = paillier.he_key_gen()
pub_file = open(public_key_file, 'w')
pub_file.write(jsonpickle.encode(public_key))
pri_file = open(private_key_file, 'w')
pri_file.write(jsonpickle.encode(private_key))
prf_key = token_bytes(16)
OPE_key = token_bytes(16)
prf_file = open(prf_key_file, 'w')
prf_file.write(jsonpickle.encode(prf_key))
ope_file = open(ope_key_file, 'w')
ope_file.write(jsonpickle.encode(OPE_key))
encrypter = OPE(OPE_key)
enc_file = open(encryptor_key_file, 'w')
enc_file.write(jsonpickle.encode(encrypter))
ppBoostKey = PPBoostKey(public_key, prf_key, encrypter)
boost_file = open(boostkey_key_file, 'w')
boost_file.write(jsonpickle.encode(ppBoostKey))
else:
pub_file = open(public_key_file, 'r')
public_key = jsonpickle.decode(pub_file.read())
pub_file.close()
pri_file = open(private_key_file, 'r')
private_key = jsonpickle.decode(pri_file.read())
pri_file.close()
prf_file = open(prf_key_file, 'r')
prf_key = jsonpickle.decode(prf_file.read())
prf_file.close()
ope_file = open(ope_key_file, 'r')
OPE_key = jsonpickle.decode(ope_file.read())
ope_file.close()
enc_file = open(encryptor_key_file, 'r')
encrypter = jsonpickle.decode(enc_file.read())
enc_file.close()
boost_file = open(boostkey_key_file, 'r')
ppBoostKey = jsonpickle.decode(boost_file.read())
boost_file.close()
return public_key,private_key,prf_key,OPE_key,encrypter,ppBoostKey
except Exception as e:
print(e)
def ppxgboostreg_predict(self,enc_predictions,private_key):
dec = []
for p in enc_predictions:
dec.append(paillier.decrypt(private_key, p))
dec_df=pd.DataFrame(dec)
return dec
# class ppkeyEncoder(JSONEncoder):
# def default(self,o):
# return o.__dict__
## Function to connect secure server via flask restapi (send enc data and receive enc prediction.)
def connect_xgboostserver(self,ppBoostKey,encrypted_xtest):
url = self.endPoint
enc_dict={}
# df_list=[encrypted_xtest.columns.values.tolist()]+df.values.tolist()
enc_dict['values']=encrypted_xtest.values.tolist()
enc_dict['features']=encrypted_xtest.columns.values.tolist()
enc_dict['key']= jsonpickle.encode(ppBoostKey)
json_out=json.dumps(enc_dict,indent=4)
headers = {
'content-type': "application/json",
'cache-control': "no-cache"
}
response = requests.post(url,auth=('admin','aion'),data=json_out,headers=headers)
#print(response.content)
outputStr=response.content
outputStr = outputStr.decode('utf-8')
outputStr = outputStr.strip()
predict_dict = json.loads(str(outputStr))
if (predict_dict['status'] == 'SUCCESS'):
data = predict_dict['data']
enc_predictions_ob=jsonpickle.decode(data)
return enc_predictions_ob
else:
print('Error')
## Create PaillierAPI based encrypted user given data , here, testdata=userdata
def generate_encrypted_testdata(self,prf_key,encrypter,testdata,min_max):
feature_set_testdata=set(testdata.columns)
ppbooster.enc_input_vector(prf_key, encrypter, feature_set_testdata, testdata, MetaData(min_max))
return testdata
## Create min and max of testdata df for pailler encryption,decryption
def training_dataset_parser(self, client_data: pd.DataFrame):
"""
:param client_data: dataframe training data
:return: minimum of the training dataset, and maximum of the training dataset.
"""
return {'min': np.min(pd.DataFrame.min(client_data)), 'max': np.max(pd.DataFrame.max(client_data))}
## Main client function call for enc data, send data to server, receive enc pred, finally decrypt prediction
def main_client(self):
self.log.info('Client actual data sample (displaying last 10 values) : \\n'+str(self.data.tail(10)))
print(" Client actual data sample (displaying last 10 values) : \\n",self.data.tail(10))
public_key,private_key,prf_key,OPE_key,encrypter,ppBoostKey = self.generate_ppboostkey()
min_max = self.training_dataset_parser(self.data)
meta_min_max = MetaData(min_max)
encrypted_testdata = self.generate_encrypted_testdata(prf_key,encrypter,self.data,min_max)
# print("Sending encrypted client data to server....\\n")
print("\\n Client side encrypted input data to server (displaying last 10 rows): \\n",encrypted_testdata.tail(10))
self.log.info('Client side encrypted input data to server (displaying last 10 rows): \\n'+str(encrypted_testdata.tail(10)))
enc_predictions = self.connect_xgboostserver(ppBoostKey,encrypted_testdata)
print("\\n Encrypted prediction from server (displaying last 10 values.): \\n",enc_predictions[-10:])
self.log.info('\\n Encrypted prediction from server (displaying last 10 values.): \\n'+str(enc_predictions[-10:]))
## Decrypted predition
dec = self.ppxgboostreg_predict(enc_predictions,private_key)
# ppxgboost_pred=pd.DataFrame(list(zip(dec, predictions)),columns =['homomorphic_prediction', 'actual_prediction'])
ppxgboost_pred=pd.DataFrame(dec,columns =['homomorphic_prediction'])
# print("final decrypted prediction at client side: \\n",ppxgboost_pred)
self.log.info("Final decrypted prediction at client side:: \\n"+str(ppxgboost_pred))
return ppxgboost_pred
## For standalone testing
if __name__ == '__main__':
problemtype='regression'
data=None
targetfeature=None
ppxgboost_client_obj=client_ppxgboost(problemtype,data,targetfeature)
ppxgboost_dec_predictions = ppxgboost_client_obj.main_client()
print("In main: ppxgboost_dec_predictions: \\n",ppxgboost_dec_predictions)<s><s> # -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import logging
import os
import sys
from logging import INFO
from script.heMulticlass import client_ppxgboost
from |
script.aion_predict import selector
from script.inputprofiler import inputprofiler
import argparse
class aion_hemulticlient:
def __init__(self):
self.confdata=None
def dataload(self,datapath):
df = pd.read_csv(datapath)
## Data preprocess in test dataset, In aion, aion profiler will handle it.
# df =df[~df.isin([np.nan, np.inf, -np.inf]).any(1)]
# df=df.reset_index(drop=True)
profilerobj = inputprofiler()
df = profilerobj.run(df)
selectobj = selector()
df = selectobj.run(df)
return df
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--inputDataLocation', help='Input Data Path')
parser.add_argument('-k', '--keyGenerate', help='True')
parser.add_argument('-e', '--endPoint', help='Service End Point')
args = parser.parse_args()
if args.inputDataLocation:
dataLocation=args.inputDataLocation
if args.keyGenerate:
keyGenerate = args.keyGenerate
else:
keyGenerate='False'
print(keyGenerate)
if args.endPoint:
endPoint=args.endPoint
else:
raise('End Point Not Defined')
classobj=aion_hemulticlient()
df=classobj.dataload(dataLocation)
aionhmcobj=client_ppxgboost(df,keyGenerate,endPoint)
ppxgboost_pred=aionhmcobj.main_client()
print("final decrypted prediction at client side: \\n",ppxgboost_pred)
<s> # -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import logging
import os
import sys
from logging import INFO
from script.heBinary import client_ppxgboost
from script.aion_predict import selector
from script.inputprofiler import inputprofiler
## Client main class for binary classification
class aion_hebinaryclient:
def __init__(self):
self.confdata=None
def configLoad(self,jsonfile):
import json
jsonfile=str(jsonfile)
with open(jsonfile, 'r') as file:
self.confdata = json.load(file)
return self.confdata
def dataload(self,datapath):
df = pd.read_csv(datapath)
## Data preprocess in test dataset, In aion, aion profiler will handle it.
# df =df[~df.isin([np.nan, np.inf, -np.inf]).any(1)]
# df=df.reset_index(drop=True)
profilerobj = inputprofiler()
df = profilerobj.run(df)
selectobj = selector()
df = selectobj.run(df)
return df
# Start Flower server for n rounds of federated learning
if __name__ == "__main__":
classobj=aion_hebinaryclient()
data_location=str(sys.argv[1])
df=classobj.dataload(data_location)
# print("df: \\n",df)
aionhmcobj=client_ppxgboost(df)
ppxgboost_pred=aionhmcobj.main_client()
print("final decrypted prediction at client side: \\n",ppxgboost_pred)
# classobj.log.info("At client end, homomorphic prediction df: \\n"+str(ppxgboost_pred))
# classobj.log.info("Aion homomorphic client encrypted prediction df: \\n"+str(ppxgboost_pred))
<s> # -*- coding: utf-8 -*-
import pandas as pd
from sklearn.model_selection import train_test_split
import numpy as np
from secrets import token_bytes
from ppxgboost import PaillierAPI as paillier
from ppxgboost import BoosterParser as boostparser
from ppxgboost import PPBooster as ppbooster
from ppxgboost.PPBooster import MetaData
from ppxgboost.PPKey import PPBoostKey
# from ope.pyope.ope import OPE
from pyope.ope import OPE
import sys
sys.path.insert(0, '..')
import logging
from logging import INFO
import pickle
import requests
import json
# from json import JSONEncoder
import jsonpickle
import os
##Aion main client class for ppxgboost based encryption,decryption
class client_ppxgboost:
def __init__(self,data):
self.data=data
self.prediction=None
## For logging
cwd = os.path.abspath(os.path.dirname(__file__))
# model_name=model_name
file_name = "he_binaryclass"
file_name=file_name+".log"
try:
hm_log=os.path.normpath(os.path.join(cwd,'logs',file_name))
os.makedirs(os.path.dirname(hm_log), exist_ok=True)
except Exception as e:
print("Log path error. Error Msg: \\n",e)
logging.basicConfig(filename=hm_log,format='%(asctime)s %(message)s', filemode='w',level=logging.DEBUG)
self.log = logging.getLogger('AION')
self.log.info('Client binary class classification homomorphic encryption service started...')
## Loading configuration parameters, Not used now.
def configload(self):
try:
data=self.confdata
usecase_name=str(data["usecase_name"])
version=str(data["version"])
problem_type=data["problem_type"]
model_location=str(data["model_location"])
data_location=str(data["data_location"])
selected_feature=data["selected_feature"]
if (type(selected_feature) is str):
selected_feature=selected_feature.split(',')
target_feature=data["target_feature"]
client_encryption_accuracy=str(data["client_encryption_accuracy"])
test_size=int(data["test_size"])
test_size=test_size/100
except Exception as e:
self.log.info("Reading server config file issue. Err.Msg: %s "+str(e))
return usecase_name,data_location,model_location,problem_type,version,selected_feature,target_feature,client_encryption_accuracy,test_size
## Load the model, Not used at client now.
def model_load(self, path):
loaded_model = pickle.load(open(path, 'rb'))
return loaded_model
#Generating secure key
def generate_ppboostkey(self):
public_key, private_key = paillier.he_key_gen()
prf_key = token_bytes(16)
OPE_key = token_bytes(16)
encrypter = OPE(OPE_key)
ppBoostKey = PPBoostKey(public_key, prf_key, encrypter)
return public_key,private_key,prf_key,OPE_key,encrypter,ppBoostKey
## Binary client prediction (decrypted prediction)
def ppxgboostbinary_predict(self,enc_predictions,private_key):
dec = ppbooster.client_decrypt(private_key, enc_predictions)
##For binary classification
res = ppbooster.client_decrypt_prediction_binary(private_key, enc_predictions)
res_df=pd.DataFrame(res)
return res
# class ppkeyEncoder(JSONEncoder):
# def default(self,o):
# return o.__dict__
## Function to connect secure server via flask restapi (send enc data and receive enc prediction.)
def connect_xgboostserver(self,ppBoostKey,encrypted_xtest):
url = 'http://localhost:9000//homomorphicprediction_server_api'
enc_dict={}
# df_list=[encrypted_xtest.columns.values.tolist()]+df.values.tolist()
enc_dict['values']=encrypted_xtest.values.tolist()
enc_dict['features']=encrypted_xtest.columns.values.tolist()
enc_dict['key']= jsonpickle.encode(ppBoostKey)
json_out=json.dumps(enc_dict,indent=4)
headers = {
'content-type': "application/json",
'cache-control': "no-cache"
}
r = requests.post(url,data=json_out,headers=headers)
enc_predictions_obj=jsonpickle.decode(r.content)
return enc_predictions_obj
## Create PaillierAPI based encrypted user given data , here, testdata=userdata
def generate_encrypted_testdata(self,prf_key,encrypter,testdata,min_max):
feature_set_testdata=set(testdata.columns)
ppbooster.enc_input_vector(prf_key, encrypter, feature_set_testdata, testdata, MetaData(min_max))
return testdata
## Create min and max of testdata df for pailler encryption,decryption
def training_dataset_parser(self, client_data: pd.DataFrame):
"""
:param client_data: dataframe training data
:return: minimum of the training dataset, and maximum of the training dataset.
"""
return {'min': np.min(pd.DataFrame.min(client_data)), 'max': np.max(pd.DataFrame.max(client_data))}
## Main client function call for enc data, send data to server, receive enc pred, finally decrypt prediction
def main_client(self):
self.log.info('Client actual data sample (displaying last 10 values) : \\n'+str(self.data.tail(10)))
print(" Client actual data sample (displaying last 10 values) : \\n",self.data.tail(10))
public_key,private_key,prf_key,OPE_key,encrypter,ppBoostKey = self.generate_ppboostkey()
min_max = self.training_dataset_parser(self.data)
meta_min_max = MetaData(min_max)
encrypted_testdata = self.generate_encrypted_testdata(prf_key,encrypter,self.data,min_max)
# print("Sending encrypted client data to server....\\n")
print("\\n Client side encrypted input data to server (displaying last 10 rows): \\n",encrypted_testdata.tail(10))
self.log.info('Client side encrypted input data to server (displaying last 10 rows): \\n'+str(encrypted_testdata.tail(10)))
enc_predictions = self.connect_xgboostserver(ppBoostKey,encrypted_testdata)
print("\\n Encrypted prediction from server (displaying last 10 values.): \\n",enc_predictions[-10:])
self.log.info('\\n Encrypted prediction from server (displaying last 10 values.): \\n'+str(enc_predictions[-10:]))
## Decrypted predition
dec = self.ppxgboostbinary_predict(enc_predictions,private_key)
# ppxgboost_pred=pd.DataFrame(list(zip(dec, predictions)),columns =['homomorphic_prediction', 'actual_prediction'])
ppxgboost_pred=pd.DataFrame(dec,columns =['homomorphic_prediction'])
self.log.info("final decrypted prediction at client side:: \\n"+str(ppxgboost_pred))
return ppxgboost_pred
## For standalone testing
if __name__ == '__main__':
problemtype='Binary classification'
data=None
targetfeature=None
ppxgboost_client_obj=client_ppxgboost(problemtype,data,targetfeature)
ppxgboost_dec_predictions = ppxgboost_client_obj.main_client()
# print("In main: ppxgboost_dec_predictions: \\n",ppxgboost_dec_predictions)<s> # -*- coding: utf-8 -*-
import pandas as pd
from sklearn.model_selection import train_test_split
import numpy as np
from secrets import token_bytes
from ppxgboost import PaillierAPI as paillier
from ppxgboost import BoosterParser as boostparser
from ppxgboost import PPBooster as ppbooster
from ppxgboost.PPBooster import MetaData
from ppxgboost.PPKey import PPBoostKey
# from ope.pyope.ope import OPE
from pyope.ope import OPE
import sys
sys.path.insert(0, '..')
import logging
from logging import INFO
import pickle
import requests
import json
# from json import JSONEncoder
import jsonpickle
import os
from pathlib import Path
##Aion main client class for ppxgboost based encryption,decryption
class client_ppxgboost:
def __init__(self,data,keyGenerate,endPoint):
self.data=data
self.keyGenerate = keyGenerate
self.endPoint = endPoint
self.prediction=None
## For logging
clientDirectory = os.path.abspath(os.path.dirname(__file__))
# model_name=model_name
file_name = "he_multiclass"
file_name=file_name+".log"
self.keydir=os.path.join(clientDirectory,'..','keys')
os.makedirs(self.keydir, exist_ok=True)
try:
hm_log=os.path.normpath(os.path.join(clientDirectory,'logs',file_name))
os.makedirs(os.path.dirname(hm_log), exist_ok=True)
except Exception as e:
print("Log path error. Error Msg: \\n",e)
logging.basicConfig(filename=hm_log,format='%(asctime)s %(message)s', filemode='w',level=logging.DEBUG)
self.log = logging.getLogger('AION')
self.log.info('Client Multi class classification homomorphic encryption service started...')
## Loading configuration parameters, Not used now.
def configload(self):
try:
data=self.confdata
usecase_name=str(data["usecase_name"])
version=str(data["version"])
problem_type=data["problem_type"]
model_location=str(data["model_location"])
data_location=str(data["data_location"])
selected_feature=data["selected_feature"]
if (type(selected_feature) is str):
selected_feature=selected_feature.split(',')
target_feature=data["target_feature"]
client_encryption_accuracy=str(data["client_encryption_accuracy"])
test_size=int(data["test_size"])
test_size=test_size/100
except Exception as e:
self.log.info("Reading server config file issue. Err.Msg: %s "+str(e))
return usecase_name,data_location,model_location,problem_type,version,selected_feature,target_feature,client_encryption_accuracy,test_size
## Load the model, Not used at client now.
def model_load(self |
, path):
loaded_model = pickle.load(open(path, 'rb'))
return loaded_model
#Generating secure key
def generate_ppboostkey(self):
try:
public_key_file = Path(__file__).parent.parent/'keys'/'public.k'
private_key_file = Path(__file__).parent.parent/'keys'/'private.k'
prf_key_file = Path(__file__).parent.parent/'keys'/'prf.k'
ope_key_file = Path(__file__).parent.parent/'keys'/'ope.k'
encryptor_key_file = Path(__file__).parent.parent/'keys'/'encryptor.k'
boostkey_key_file = Path(__file__).parent.parent/'keys'/'boostkey.k'
if not boostkey_key_file.exists() or self.keyGenerate == 'True':
public_key, private_key = paillier.he_key_gen()
pub_file = open(public_key_file, 'w')
pub_file.write(jsonpickle.encode(public_key))
pri_file = open(private_key_file, 'w')
pri_file.write(jsonpickle.encode(private_key))
prf_key = token_bytes(16)
OPE_key = token_bytes(16)
prf_file = open(prf_key_file, 'w')
prf_file.write(jsonpickle.encode(prf_key))
ope_file = open(ope_key_file, 'w')
ope_file.write(jsonpickle.encode(OPE_key))
encrypter = OPE(OPE_key)
enc_file = open(encryptor_key_file, 'w')
enc_file.write(jsonpickle.encode(encrypter))
ppBoostKey = PPBoostKey(public_key, prf_key, encrypter)
boost_file = open(boostkey_key_file, 'w')
boost_file.write(jsonpickle.encode(ppBoostKey))
else:
pub_file = open(public_key_file, 'r')
public_key = jsonpickle.decode(pub_file.read())
pub_file.close()
pri_file = open(private_key_file, 'r')
private_key = jsonpickle.decode(pri_file.read())
pri_file.close()
prf_file = open(prf_key_file, 'r')
prf_key = jsonpickle.decode(prf_file.read())
prf_file.close()
ope_file = open(ope_key_file, 'r')
OPE_key = jsonpickle.decode(ope_file.read())
ope_file.close()
enc_file = open(encryptor_key_file, 'r')
encrypter = jsonpickle.decode(enc_file.read())
enc_file.close()
boost_file = open(boostkey_key_file, 'r')
ppBoostKey = jsonpickle.decode(boost_file.read())
boost_file.close()
return public_key,private_key,prf_key,OPE_key,encrypter,ppBoostKey
except Exception as e:
print(e)
## PPboost multiclass prediction fn
def ppxgboostmulticlass_predict(self,enc_predictions,private_key):
##For binary classification
# res = ppbooster.client_decrypt_prediction_binary(private_key, enc_predictions)
## For multiclass classification
res = ppbooster.client_decrypt_prediction_multiclass(private_key, enc_predictions)
return res
# class ppkeyEncoder(JSONEncoder):
# def default(self,o):
# return o.__dict__
## Function to connect secure server via flask restapi (send enc data and receive enc prediction.)
def connect_xgboostserver(self,ppBoostKey,encrypted_xtest):
url = self.endPoint
enc_dict={}
# df_list=[encrypted_xtest.columns.values.tolist()]+df.values.tolist()
enc_dict['values']=encrypted_xtest.values.tolist()
enc_dict['features']=encrypted_xtest.columns.values.tolist()
enc_dict['key']= jsonpickle.encode(ppBoostKey)
json_out=json.dumps(enc_dict,indent=4)
headers = {
'content-type': "application/json",
'cache-control': "no-cache"
}
response = requests.post(url,auth=('admin','aion'),data=json_out,headers=headers)
#print(response.content)
outputStr=response.content
outputStr = outputStr.decode('utf-8')
outputStr = outputStr.strip()
predict_dict = json.loads(str(outputStr))
if (predict_dict['status'] == 'SUCCESS'):
data = predict_dict['data']
enc_predictions_ob=jsonpickle.decode(data)
return enc_predictions_ob
else:
print('Error')
## Create PaillierAPI based encrypted user given data , here, testdata=userdata
def generate_encrypted_testdata(self,prf_key,encrypter,testdata,min_max):
feature_set_testdata=set(testdata.columns)
ppbooster.enc_input_vector(prf_key, encrypter, feature_set_testdata, testdata, MetaData(min_max))
return testdata
## Create min and max of testdata df for pailler encryption,decryption
def training_dataset_parser(self, client_data: pd.DataFrame):
"""
:param client_data: dataframe training data
:return: minimum of the training dataset, and maximum of the training dataset.
"""
return {'min': np.min(pd.DataFrame.min(client_data)), 'max': np.max(pd.DataFrame.max(client_data))}
## Main client function call for enc data, send data to server, receive enc pred, finally decrypt prediction
def main_client(self):
self.log.info('Client actual data sample (displaying last 10 values) : \\n'+str(self.data.tail(10)))
#print(" Client actual data sample (displaying last 10 values) : \\n",self.data.tail(10))
public_key,private_key,prf_key,OPE_key,encrypter,ppBoostKey = self.generate_ppboostkey()
min_max = self.training_dataset_parser(self.data)
meta_min_max = MetaData(min_max)
#print('++++++++++++++++++++++++++++')
encrypted_testdata = self.generate_encrypted_testdata(prf_key,encrypter,self.data,min_max)
# print("Sending encrypted client data to server....\\n")
#print("\\n Client side encrypted input data to server (displaying last 10 rows): \\n",encrypted_testdata.tail(10))
self.log.info('Client side encrypted input data to server (displaying last 10 rows): \\n'+str(encrypted_testdata.tail(10)))
enc_predictions = self.connect_xgboostserver(ppBoostKey,encrypted_testdata)
#print("\\n Encrypted prediction from server (displaying last 10 values.): \\n",enc_predictions[-10:])
#self.log.info('\\n Encrypted prediction from server (displaying last 10 values.): \\n'+str(enc_predictions[-10:]))
## Decrypted predition
dec = self.ppxgboostmulticlass_predict(enc_predictions,private_key)
# ppxgboost_pred=pd.DataFrame(list(zip(dec, predictions)),columns =['homomorphic_prediction', 'actual_prediction'])
ppxgboost_pred=pd.DataFrame(dec,columns =['homomorphic_prediction'])
self.log.info("final decrypted prediction at client side:: \\n"+str(ppxgboost_pred))
return ppxgboost_pred
## For standalone testing
if __name__ == '__main__':
problemtype='Multi class classification'
data=None
targetfeature=None
ppxgboost_client_obj=client_ppxgboost(problemtype,data,targetfeature)
ppxgboost_dec_predictions = ppxgboost_client_obj.main_client()
# print("In main: ppxgboost_dec_predictions: \\n",ppxgboost_dec_predictions)<s> import os
from typing import List, Tuple
import numpy as np
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from tensorflow.keras.datasets import cifar10, fashion_mnist, imdb, mnist
from tensorflow.keras.preprocessing.sequence import pad_sequences
TrainTestData = Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]
TrainTestValData = Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]
def load_image(data_source: str) -> TrainTestData:
"""
Loads one of the following image datasets: {mnist, famnist, cifar10}.
Normalizes the data. Returns X and y for both train and test datasets.
Dtypes of X's and y's will be `float32` and `int32` to be compatible with `tf_agents`.
:param data_source: Either mnist, famnist or cifar10
:type data_source: str
:return: Tuple of (X_train, y_train, X_test, y_test) containing original split of train/test
:rtype: tuple
"""
reshape_shape = -1, 28, 28, 1
if data_source == "mnist":
(X_train, y_train), (X_test, y_test) = mnist.load_data()
elif data_source == "famnist":
(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()
elif data_source == "cifar10":
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
reshape_shape = -1, 32, 32, 3
else:
raise ValueError("No valid `data_source`.")
X_train = X_train.reshape(reshape_shape).astype(np.float32) # Float32 is the expected dtype for the observation spec in the env
X_test = X_test.reshape(reshape_shape).astype(np.float32)
X_train /= 255 # /= is not available when casting int to float: https://stackoverflow.com/a/48948461/10603874
X_test /= 255
y_train = y_train.reshape(y_train.shape[0], ).astype(np.int32)
y_test = y_test.reshape(y_test.shape[0], ).astype(np.int32)
return X_train, y_train, X_test, y_test
def load_csv(fp_train: str, fp_test: str, label_col: str, drop_cols: List[str], normalization: bool = False) -> TrainTestData:
"""
Loads any csv-file from local filepaths. Returns X and y for both train and test datasets.
Option to normalize the data with min-max normalization.
Only csv-files with float32 values for the features and int32 values for the labels supported.
Source for dataset: https://mimic-iv.mit.edu/
:param fp_train: Location of the train csv-file
:type fp_train: str
:param fp_test: Location of the test csv-file
:type fp_test: str
:param label_col: The name of the column containing the labels of the data
:rtype label_col: str
:param drop_cols: List of the names of the columns to be dropped. `label_col` gets dropped automatically
:rtype drop_cols: List of strings
:param normalization: Normalize the data with min-max normalization?
:type normalization: bool
:return: Tuple of (X_train, y_train, X_test, y_test) containing original split of train/test
:rtype: Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]
"""
if not os.path.isfile(fp_train):
raise FileNotFoundError(f"`fp_train` {fp_train} does not exist.")
if not os.path.isfile(fp_test):
raise FileNotFoundError(f"`fp_test` {fp_test} does not exist.")
if not isinstance(normalization, bool):
raise TypeError(f"`normalization` must be of type `bool`, not {type(normalization)}")
X_train = read_csv(fp_train).astype(np.float32) # DataFrames directly converted to float32
X_test = read_csv(fp_test).astype(np.float32)
y_train = X_train[label_col].astype(np.int32)
y_test = X_test[label_col].astype(np.int32)
X_train.drop(columns=drop_cols + [label_col], inplace=True) # Dropping cols and label column
X_test.drop(columns=drop_cols + [label_col], inplace=True)
# Other data sources are already normalized. RGB values are always in range 0 to 255.
if normalization:
mini, maxi = X_train.min(axis=0), X_train.max(axis=0)
X_train -= mini
X_train /= maxi - mini
X_test -= mini
X_test /= maxi - mini
return X_train.values, y_train.values, X_test.values, y_test.values # Numpy arrays
def load_imdb(config: Tuple[int, int] = (5_000, 500)) -> TrainTestData:
"""Loads the IMDB dataset. Returns X and y for both train and test datasets.
:param config: Tuple of number of most frequent words and max length of each sequence.
:type config: str
:return: Tuple of (X_train, y_train, X_test, y_test) containing original split of train/test
:rtype: tuple
"""
if not isinstance(config, (tuple, list)):
raise TypeError(f"{type(config)} is no valid datatype for `config`.")
if len(config) != 2:
raise ValueError("Tuple length of `config` must be 2.")
if not all(i > 0 for i in config):
raise ValueError("All integers of `config` must be > 0.")
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=config[0])
X_train = pad_sequences(X_train, maxlen=config[1])
X_test = pad_sequences(X_test, maxlen=config[1])
y_train = y_train.astype(np.int32)
y_test = y_test.ast |
ype(np.int32)
return X_train, y_train, X_test, y_test
def get_train_test_val(X_train: np.ndarray, y_train: np.ndarray, X_test: np.ndarray, y_test: np.ndarray, min_classes: List[int],
maj_classes: List[int], imb_ratio: float = None, imb_test: bool = True, val_frac: float = 0.25,
print_stats: bool = True) -> TrainTestValData:
"""
Imbalances data and divides the data into train, test and validation sets.
The imbalance rate of each individual dataset is approx. the same as the given `imb_ratio`.
:param X_train: The X_train data
:type X_train: np.ndarray
:param y_train: The y_train data
:type y_train: np.ndarray
:param X_test: The X_test data
:type X_test: np.ndarray
:param y_test: The y_test data
:type y_test: np.ndarray
:param min_classes: List of labels of all minority classes
:type min_classes: list
:param maj_classes: List of labels of all majority classes.
:type maj_classes: list
:param imb_ratio: Imbalance ratio for minority to majority class: len(minority datapoints) / len(majority datapoints)
If the `imb_ratio` is None, data will not be imbalanced and will only be relabeled to 1's and 0's.
:type imb_ratio: float
:param imb_test: Imbalance the test dataset?
:type imb_test: bool
:param val_frac: Fraction to take from X_train and y_train for X_val and y_val
:type val_frac: float
:param print_stats: Print the imbalance ratio of the imbalanced data?
:type print_stats: bool
:return: Tuple of (X_train, y_train, X_test, y_test, X_val, y_val)
:rtype: Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]
"""
if not 0 < val_frac < 1:
raise ValueError(f"{val_frac} is not in interval 0 < x < 1.")
if not isinstance(print_stats, bool):
raise TypeError(f"`print_stats` must be of type `bool`, not {type(print_stats)}.")
X_train, y_train = imbalance_data(X_train, y_train, min_classes, maj_classes, imb_ratio=imb_ratio)
# Only imbalance test-data if imb_test is True
X_test, y_test = imbalance_data(X_test, y_test, min_classes, maj_classes, imb_ratio=imb_ratio if imb_test else None)
# stratify=y_train to ensure class balance is kept between train and validation datasets
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=val_frac, stratify=y_train)
if print_stats:
p_train, p_test, p_val = [((y == 1).sum(), imbalance_ratio(y)) for y in (y_train, y_test, y_val)]
print(f"Imbalance ratio `p`:\\n"
f"\\ttrain: n={p_train[0]}, p={p_train[1]:.6f}\\n"
f"\\ttest: n={p_test[0]}, p={p_test[1]:.6f}\\n"
f"\\tvalidation: n={p_val[0]}, p={p_val[1]:.6f}")
return X_train, y_train, X_test, y_test, X_val, y_val
def imbalance_data(X: np.ndarray, y: np.ndarray, min_class: List[int], maj_class: List[int],
imb_ratio: float = None) -> Tuple[np.ndarray, np.ndarray]:
"""
Split data in minority and majority, only values in {min_class, maj_class} will be kept.
(Possibly) decrease minority rows to match the imbalance rate.
If initial imb_ratio of dataset is lower than given `imb_ratio`, the imb_ratio of the returned data will not be changed.
If the `imb_ratio` is None, data will not be imbalanced and will only be relabeled to 1's and 0's.
"""
if not isinstance(X, np.ndarray):
raise TypeError(f"`X` must be of type `np.ndarray` not {type(X)}")
if not isinstance(y, np.ndarray):
raise TypeError(f"`y` must be of type `np.ndarray` not {type(y)}")
if X.shape[0] != y.shape[0]:
raise ValueError("`X` and `y` must contain the same amount of rows.")
if not isinstance(min_class, (list, tuple)):
raise TypeError("`min_class` must be of type list or tuple.")
if not isinstance(maj_class, (list, tuple)):
raise TypeError("`maj_class` must be of type list or tuple.")
if (imb_ratio is not None) and not (0 < imb_ratio < 1):
raise ValueError(f"{imb_ratio} is not in interval 0 < imb_ratio < 1.")
if imb_ratio is None: # Do not imbalance data if no `imb_ratio` is given
imb_ratio = 1
X_min = X[np.isin(y, min_class)] # Mask the correct indexes
X_maj = X[np.isin(y, maj_class)] # Only keep data/labels for x in {min_class, maj_class} and forget all other
min_len = int(X_maj.shape[0] * imb_ratio) # Amount of rows to select from minority classes to get to correct imbalance ratio
# Keep all majority rows, decrease minority rows to match `imb_ratio`
X_min = X_min[np.random.choice(X_min.shape[0], min(min_len, X_min.shape[0]), replace=False), :]
X_imb = np.concatenate([X_maj, X_min]).astype(np.float32)
y_imb = np.concatenate((np.zeros(X_maj.shape[0]), np.ones(X_min.shape[0]))).astype(np.int32)
X_imb, y_imb = shuffle(X_imb, y_imb)
return X_imb, y_imb
<s><s> import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.metrics import (auc, average_precision_score, confusion_matrix,
f1_score, precision_recall_curve, roc_curve,precision_score,recall_score)
from tensorflow import constant
from tf_agents.trajectories import time_step
def network_predictions(network, X: np.ndarray) -> dict:
"""Computes y_pred using a given network.
Input is array of data entries.
:param network: The network to use to calculate metrics
:type network: (Q)Network
:param X: X data, input to network
:type X: np.ndarray
:return: Numpy array of predicted targets for given X
:rtype: np.ndarray
"""
if not isinstance(X, np.ndarray):
raise ValueError(f"`X` must be of type `np.ndarray` not {type(X)}")
q, _ = network(X, step_type=constant([time_step.StepType.FIRST] * X.shape[0]), training=False)
return np.argmax(q.numpy(), axis=1) # Max action for each x in X
def decision_function(network, X: np.ndarray) -> dict:
"""Computes the score for the predicted class of each x in X using a given network.
Input is array of data entries.
:param network: The network to use to calculate the score per x in X
:type network: (Q)Network
:param X: X data, input to network
:type X: np.ndarray
:return: Numpy array of scores for given X
:rtype: np.ndarray
"""
if not isinstance(X, np.ndarray):
raise ValueError(f"`X` must be of type `np.ndarray` not {type(X)}")
q, _ = network(X, step_type=constant([time_step.StepType.FIRST] * X.shape[0]), training=False)
return np.max(q.numpy(), axis=1) # Value of max action for each x in X
def classification_metrics(y_true: list, y_pred: list) -> dict:
"""Computes metrics using y_true and y_pred.
:param y_true: True labels
:type y_true: np.ndarray
:param y_pred: Predicted labels, corresponding to y_true
:type y_pred: np.ndarray
:return: Dictionairy containing Geometric Mean, F1, Precision, Recall, TP, TN, FP, FN
:rtype: dict
"""
if not isinstance(y_true, (list, tuple, np.ndarray)):
raise ValueError(f"`y_true` must be of type `list` not {type(y_true)}")
if not isinstance(y_pred, (list, tuple, np.ndarray)):
raise ValueError(f"`y_pred` must be of type `list` not {type(y_pred)}")
if len(y_true) != len(y_pred):
raise ValueError("`X` and `y` must be of same length.")
#G_mean = np.sqrt(recall * specificity) # Geometric mean of recall and specificity
F1 = f1_score(y_true, y_pred, average='macro') # Default F-measure
recall = recall_score(y_true,y_pred,average='macro')
precision = precision_score(y_true,y_pred,average='macro')
return {"F1": F1, "Precision": precision, "Recall": recall}
def plot_pr_curve(network, X_test: np.ndarray, y_test: np.ndarray,
X_train: np.ndarray = None, y_train: np.ndarray = None) -> None: # pragma: no cover
"""Plots PR curve of X_test and y_test of given network.
Optionally plots PR curve of X_train and y_train.
Average precision is shown in the legend.
:param network: The network to use to calculate the PR curve
:type network: (Q)Network
:param X_test: X data, input to network
:type X_test: np.ndarray
:param y_test: True labels for `X_test`
:type y_test: np.ndarray
:param X_train: Optional X data to plot validation PR curve
:type X_train: np.ndarray
:param y_train: True labels for `X_val`
:type y_train: np.ndarray
:return: None
:rtype: NoneType
"""
plt.plot((0, 1), (1, 0), color="black", linestyle="--", label="Baseline")
# TODO: Consider changing baseline
if X_train is not None and y_train is not None:
y_val_score = decision_function(network, X_train)
val_precision, val_recall, _ = precision_recall_curve(y_train, y_val_score)
val_AP = average_precision_score(y_train, y_val_score)
plt.plot(val_recall, val_precision, label=f"Train AP: {val_AP:.3f}")
y_test_score = decision_function(network, X_test)
test_precision, test_recall, _ = precision_recall_curve(y_test, y_test_score)
test_AP = average_precision_score(y_test, y_test_score)
plt.plot(test_recall, test_precision, label=f"Test AP: {test_AP:.3f}")
plt.xlim((-0.05, 1.05))
plt.ylim((-0.05, 1.05))
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.title("PR Curve")
plt.gca().set_aspect("equal", adjustable="box")
plt.legend(loc="lower left")
plt.grid(True)
plt.show()
def plot_roc_curve(network, X_test: np.ndarray, y_test: np.ndarray,
X_train: np.ndarray = None, y_train: np.ndarray = None) -> None: # pragma: no cover
"""Plots ROC curve of X_test and y_test of given network.
Optionally plots ROC curve of X_train and y_train.
Average precision is shown in the legend.
:param network: The network to use to calculate the PR curve
:type network: (Q)Network
:param X_test: X data, input to network
:type X_test: np.ndarray
:param y_test: True labels for `X_test`
:type y_test: np.ndarray
:param X_train: Optional X data to plot validation PR curve
:type X_train: np.ndarray
:param y_train: True labels for `X_val`
:type y_train: np.ndarray
:return: None
:rtype: NoneType
"""
plt.plot((0, 1), (0, 1), color="black", linestyle="--", label="Baseline")
# TODO: Consider changing baseline
if X_train is not None and y_train is not None:
y_train_score = decision_function(network, X_train)
fpr_train, tpr_train, _ = roc_curve(y_train, y_train_score)
plt.plot(fpr_train, tpr_train, label=f"Train AUROC: {auc(fpr_train, tpr_train):.2f}")
y_test_score = decision_function(network, X_test)
fpr_test, tpr_test, _ = roc_curve(y_test, y_test_score)
plt.plot(fpr_test, tpr_test, label=f"Test AUROC: {auc(fpr_test, tpr_test):.2f}")
plt.xlim((-0.05, 1.05))
plt.ylim((-0.05, 1.05))
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC Curve")
plt.gca().set_aspect("equal", adjustable="box")
plt.legend(loc="lower right")
plt.grid(True)
plt.show()
def plot_confusion_matrix(TP: int, FN: int, FP: int, TN: int) |
-> None: # pragma: no cover
"""Plots confusion matric of given TP, FN, FP, TN.
:param TP: True Positive
:type TP: int
:param FN: False Negative
:type FN: int
:param FP: False Positive
:type FP: int
:param TN: True Nega |
per_episode,memory_length=memory_length, collect_every=collect_every, n_step_update=n_step_update, model_path=model_save_path,log_dir=logFilePath)
model.compile_model(X_train,y_train,layers)
model.q_net.summary()
model.train(xval,yval)
network = model.get_network()
predictedytrain=network_predictions(network,np.array(xtrain))
predictedytest = network_predictions(network,np.array(xtest))
if "DDQN" == algorithm:
start = time.time()
modelName = "DDQN"
model = TrainDDQN(episodes, warmup_steps, learning_rate, gamma, min_epsilon, decay_episodes, target_update_period=target_update_period,target_update_tau=target_update_tau, batch_size=batch_size, collect_steps_per_episode=collect_steps_per_episode,memory_length=memory_length, collect_every=collect_every, n_step_update=n_step_update,log_dir=logFilePath)
model.compile_model(X_train,y_train,layers)
model.q_net.summary()
model.train(xval,yval)
network = model.get_network()
predictedytrain=network_predictions(network,np.array(xtrain))
predictedytest = network_predictions(network,np.array(xtest))
score = objClf.get_score(self.scoreParam,ytest,predictedytest)
score = round(score,2)
return (network,self.rl_config,score,algorithm,-1,-1,-1)
except Exception as inst:
self.log.info( '\\n-----> RL Failed!!!.'+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
<s> import os
import pickle
from datetime import datetime
import numpy as np
import tensorflow as tf
from reinforcement.environments.classifierenv import ClassifierEnv
from reinforcement.metrics import (classification_metrics, decision_function,
network_predictions, plot_pr_curve, plot_roc_curve)
from reinforcement.utils import imbalance_ratio
from tensorflow import data
from tensorflow.keras.optimizers import Adam
from tf_agents.agents.dqn.dqn_agent import DdqnAgent
from tf_agents.drivers.dynamic_step_driver import DynamicStepDriver
from tf_agents.environments.tf_py_environment import TFPyEnvironment
from tf_agents.networks.sequential import Sequential
from tf_agents.policies.random_tf_policy import RandomTFPolicy
from tf_agents.replay_buffers.tf_uniform_replay_buffer import \\
TFUniformReplayBuffer
from tf_agents.utils import common
class TrainDDQN():
"""Wrapper for DDQN training, validation, saving etc."""
def __init__(self, episodes: int, warmup_steps: int, learning_rate: float, gamma: float, min_epsilon: float, decay_episodes: int,
model_path: str = None, log_dir: str = None, batch_size: int = 64, memory_length: int = None,
collect_steps_per_episode: int = 1, val_every: int = None, target_update_period: int = 1, target_update_tau: float = 1.0,
progressbar: bool = True, n_step_update: int = 1, gradient_clipping: float = 1.0, collect_every: int = 1) -> None:
"""
Wrapper to make training easier.
Code is partly based of https://www.tensorflow.org/agents/tutorials/1_dqn_tutorial
:param episodes: Number of training episodes
:type episodes: int
:param warmup_steps: Number of episodes to fill Replay Buffer with random state-action pairs before training starts
:type warmup_steps: int
:param learning_rate: Learning Rate for the Adam Optimizer
:type learning_rate: float
:param gamma: Discount factor for the Q-values
:type gamma: float
:param min_epsilon: Lowest and final value for epsilon
:type min_epsilon: float
:param decay_episodes: Amount of episodes to decay from 1 to `min_epsilon`
:type decay_episodes: int
:param model_path: Location to save the trained model
:type model_path: str
:param log_dir: Location to save the logs, usefull for TensorBoard
:type log_dir: str
:param batch_size: Number of samples in minibatch to train on each step
:type batch_size: int
:param memory_length: Maximum size of the Replay Buffer
:type memory_length: int
:param collect_steps_per_episode: Amount of data to collect for Replay Buffer each episiode
:type collect_steps_per_episode: int
:param collect_every: Step interval to collect data during training
:type collect_every: int
:param val_every: Validate the model every X episodes using the `collect_metrics()` function
:type val_every: int
:param target_update_period: Update the target Q-network every X episodes
:type target_update_period: int
:param target_update_tau: Parameter for softening the `target_update_period`
:type target_update_tau: float
:param progressbar: Enable or disable the progressbar for collecting data and training
:type progressbar: bool
:return: None
:rtype: NoneType
"""
self.episodes = episodes # Total episodes
self.warmup_steps = warmup_steps # Amount of warmup steps before training
self.batch_size = batch_size # Batch size of Replay Memory
self.collect_steps_per_episode = collect_steps_per_episode # Amount of steps to collect data each episode
self.collect_every = collect_every # Step interval to collect data during training
self.learning_rate = learning_rate # Learning Rate
self.gamma = gamma # Discount factor
self.min_epsilon = min_epsilon # Minimal chance of choosing random action
self.decay_episodes = decay_episodes # Number of episodes to decay from 1.0 to `EPSILON`
self.target_update_period = target_update_period # Period for soft updates
self.target_update_tau = target_update_tau
self.progressbar = progressbar # Enable or disable the progressbar for collecting data and training
self.n_step_update = n_step_update
self.gradient_clipping = gradient_clipping # Clip the loss
self.compiled = False
NOW = "DDQN" #datetime.now().strftime("%Y%m%d_%H%M%S")
if memory_length is not None:
self.memory_length = memory_length # Max Replay Memory length
else:
self.memory_length = warmup_steps
if val_every is not None:
self.val_every = val_every # Validate the policy every `val_every` episodes
else:
self.val_every = self.episodes // min(50, self.episodes) # Can't validate the model 50 times if self.episodes < 50
if model_path is not None:
#if os.path.exists(model_path + "/" + NOW + ".pkl"):
# os.remove(model_path + "/" + NOW + ".pkl")
self.model_path = model_path + "/" + NOW + ".pkl"
else:
self.model_path = "./models/" + NOW + ".pkl"
if log_dir is None:
log_dir = "./logs/" + NOW
self.writer = tf.summary.create_file_writer(log_dir)
def compile_model(self, X_train, y_train, layers: list = [], imb_ratio: float = None, loss_fn=common.element_wise_squared_loss) -> None:
"""Initializes the neural networks, DDQN-agent, collect policies and replay buffer.
:param X_train: Training data for the model.
:type X_train: np.ndarray
:param y_train: Labels corresponding to `X_train`. 1 for the positive class, 0 for the negative class.
:param y_train: np.ndarray
:param layers: List of layers to feed into the TF-agents custom Sequential(!) layer.
:type layers: list
:param imb_ratio: The imbalance ratio of the data.
:type imb_ratio: float
:param loss_fn: Callable loss function
:type loss_fn: tf.compat.v1.losses
:return: None
:rtype: NoneType
"""
if imb_ratio is None:
imb_ratio = imbalance_ratio(y_train)
self.train_env = TFPyEnvironment(ClassifierEnv(X_train, y_train, imb_ratio))
self.global_episode = tf.Variable(0, name="global_episode", dtype=np.int64, trainable=False) # Global train episode counter
# Custom epsilon decay: https://github.com/tensorflow/agents/issues/339
epsilon_decay = tf.compat.v1.train.polynomial_decay(
1.0, self.global_episode, self.decay_episodes, end_learning_rate=self.min_epsilon)
self.q_net = Sequential(layers, self.train_env.observation_spec())
self.agent = DdqnAgent(self.train_env.time_step_spec(),
self.train_env.action_spec(),
q_network=self.q_net,
optimizer=Adam(learning_rate=self.learning_rate),
td_errors_loss_fn=loss_fn,
train_step_counter=self.global_episode,
target_update_period=self.target_update_period,
target_update_tau=self.target_update_tau,
gamma=self.gamma,
epsilon_greedy=epsilon_decay,
n_step_update=self.n_step_update,
gradient_clipping=self.gradient_clipping)
self.agent.initialize()
self.random_policy = RandomTFPolicy(self.train_env.time_step_spec(), self.train_env.action_spec())
self.replay_buffer = TFUniformReplayBuffer(data_spec=self.agent.collect_data_spec,
batch_size=self.train_env.batch_size,
max_length=self.memory_length)
self.warmup_driver = DynamicStepDriver(self.train_env,
self.random_policy,
observers=[self.replay_buffer.add_batch],
num_steps=self.warmup_steps) # Uses a random policy
self.collect_driver = DynamicStepDriver(self.train_env,
self.agent.collect_policy,
observers=[self.replay_buffer.add_batch],
num_steps=self.collect_steps_per_episode) # Uses the epsilon-greedy policy of the agent
self.agent.train = common.function(self.agent.train) # Optimalization
self.warmup_driver.run = common.function(self.warmup_driver.run)
self.collect_driver.run = common.function(self.collect_driver.run)
self.compiled = True
def train(self, *args) -> None:
"""Starts the training of the model. Includes warmup period, metrics collection and model saving.
:param *args: All arguments will be passed to `collect_metrics()`.
This can be usefull to pass callables, testing environments or validation data.
Overwrite the TrainDDQN.collect_metrics() function to use your own *args.
:type *args: Any
:return: None
:rtype: NoneType, last step is saving the model as a side-effect
"""
assert self.compiled, "Model must be compiled with model.compile_model(X_train, y_train, layers) before training."
# Warmup period, fill memory with random actions
if self.progressbar:
print(f"\\033[92mCollecting data for {self.warmup_steps:_} steps... This might take a few minutes...\\033[0m")
self.warmup_driver.run(time_step=None, policy_state=self.random_policy.get_initial_state(self.train_env.batch_size))
if self.progressbar:
print(f"\\033[92m{self.replay_buffer.num_frames():_} frames collected!\\033[0m")
dataset = self.replay_buffer.as_dataset(sample_batch_size=self.batch_size, num_steps=self.n_step_update + 1,
num_parallel_calls=data.experimental.AUTOTUNE).prefetch(data.experimental.AUTOTUNE)
iterator = iter(dataset)
def _train():
experiences, _ = next(iterator)
return self.agent.train(experiences).loss
_train = common.function(_train) # Optimalization
ts = None
policy_state = self.agent.collect_policy.get_initial_state(self.train_env.batch_size)
self.collect_metrics(*args) # Initial collection for step 0
for _ in range(self.episodes):
if not self.global_episode % self.collect_every:
# Collect a few steps using collect_policy and save to `replay_buffer`
if self.collect_steps_per_episode != 0:
ts, policy_state = self.collect_driver.run(time_step=ts, policy_state=policy_state)
# Sample a batch of data from `replay_buffer` and update the agent's network
train_loss = _train()
if not self.global_episode % self.val_every:
with self.writer.as_default():
tf.summary.scalar("train_loss", train_loss, step=self.global_episode)
self.collect_metrics(*args)
def collect_metrics(self, X_val: np.ndarray, y_val: np.ndarray, save_best: str = None):
"""Collects metrics using the trained Q-network.
:param X_val: Features of validation data, same shape as X_train
:type X_val: np.ndarray
:param y_val: Labels of validation data, same shape as y_train
:type y |
_val: np.ndarray
:param save_best: Saving the best model of all validation runs based on given metric:
Choose one of: {Gmean, F1, Precision, Recall, TP, TN, FP, FN}
This improves stability since the model at the last episode is not guaranteed to be the best model.
:type save_best: str
"""
y_pred = network_predictions(self.agent._target_q_network, X_val)
stats = classification_metrics(y_val, y_pred)
avgQ = np.mean(decision_function(self.agent._target_q_network, X_val)) # Max action for each x in X
if save_best is not None:
if not hasattr(self, "best_score"): # If no best model yet
self.best_score = 0.0
if stats.get(save_best) >= self.best_score: # Overwrite best model
self.save_network() # Saving directly to avoid shallow copy without trained weights
self.best_score = stats.get(save_best)
with self.writer.as_default():
tf.summary.scalar("AverageQ", avgQ, step=self.global_episode) # Average Q-value for this epoch
for k, v in stats.items():
tf.summary.scalar(k, v, step=self.global_episode)
def evaluate(self,X_train,y_train, X_test, y_test):
"""
Final evaluation of trained Q-network with X_test and y_test.
Optional PR and ROC curve comparison to X_train, y_train to ensure no overfitting is taking place.
:param X_test: Features of test data, same shape as X_train
:type X_test: np.ndarray
:param y_test: Labels of test data, same shape as y_train
:type y_test: np.ndarray
:param X_train: Features of train data
:type X_train: np.ndarray
:param y_train: Labels of train data
:type y_train: np.ndarray
"""
#if hasattr(self, "best_score"):
# print(f"\\033[92mBest score: {self.best_score:6f}!\\033[0m")
# network = self.load_network(self.model_path) # Load best saved model
#else:
# network = self.agent._target_q_network # Load latest target model
#network = self.load_network(self.model_path)
#if (X_train is not None) and (y_train is not None):
# plot_pr_curve(network, X_test, y_test, X_train, y_train)
# plot_roc_curve(network, X_test, y_test, X_train, y_train)
y_pred = network_predictions(self.agent._target_q_network, X_test)
return classification_metrics(y_test, y_pred)
def get_network(self):
#network = self.load_network(self.model_path)
return self.agent._target_q_network
def save_network(self, filename_rl): #usnish
"""Saves Q-network as pickle to `model_path`."""
with open(self.filename_rl, "wb") as f: # Save Q-network as pickle
pickle.dump(self.agent._target_q_network, f)
@staticmethod
def load_network(fp: str):
"""Static method to load Q-network pickle from given filepath.
:param fp: Filepath to the saved pickle of the network
:type fp: str
:returns: The network-object loaded from a pickle file.
:rtype: tensorflow.keras.models.Model
"""
with open(fp, "rb") as f: # Load the Q-network
network = pickle.load(f)
return network
<s><s> import os
import pickle
from datetime import datetime
import numpy as np
import tensorflow as tf
from reinforcement.environments.classifierenv import ClassifierEnv
from reinforcement.metrics import (classification_metrics, decision_function,
network_predictions, plot_pr_curve, plot_roc_curve)
from reinforcement.utils import imbalance_ratio
from tensorflow import data
from tensorflow.keras.optimizers import Adam
#from tf_agents.agents.dqn.dqn_agent import DdqnAgent
from tf_agents.agents import DqnAgent
from tf_agents.drivers.dynamic_step_driver import DynamicStepDriver
from tf_agents.environments.tf_py_environment import TFPyEnvironment
from tf_agents.networks.sequential import Sequential
from tf_agents.policies.random_tf_policy import RandomTFPolicy
from tf_agents.replay_buffers.tf_uniform_replay_buffer import \\
TFUniformReplayBuffer
from tf_agents.utils import common
class TrainDQN():
"""Wrapper for DDQN training, validation, saving etc."""
def __init__(self, episodes: int, warmup_steps: int, learning_rate: float, gamma: float, min_epsilon: float, decay_episodes: int,
model_path: str = None, log_dir: str = None, batch_size: int = 64, memory_length: int = None,
collect_steps_per_episode: int = 1, val_every: int = None, target_update_period: int = 1, target_update_tau: float = 1.0,
progressbar: bool = True, n_step_update: int = 1, gradient_clipping: float = 1.0, collect_every: int = 1) -> None:
"""
Wrapper to make training easier.
Code is partly based of https://www.tensorflow.org/agents/tutorials/1_dqn_tutorial
:param episodes: Number of training episodes
:type episodes: int
:param warmup_steps: Number of episodes to fill Replay Buffer with random state-action pairs before training starts
:type warmup_steps: int
:param learning_rate: Learning Rate for the Adam Optimizer
:type learning_rate: float
:param gamma: Discount factor for the Q-values
:type gamma: float
:param min_epsilon: Lowest and final value for epsilon
:type min_epsilon: float
:param decay_episodes: Amount of episodes to decay from 1 to `min_epsilon`
:type decay_episodes: int
:param model_path: Location to save the trained model
:type model_path: str
:param log_dir: Location to save the logs, usefull for TensorBoard
:type log_dir: str
:param batch_size: Number of samples in minibatch to train on each step
:type batch_size: int
:param memory_length: Maximum size of the Replay Buffer
:type memory_length: int
:param collect_steps_per_episode: Amount of data to collect for Replay Buffer each episiode
:type collect_steps_per_episode: int
:param collect_every: Step interval to collect data during training
:type collect_every: int
:param val_every: Validate the model every X episodes using the `collect_metrics()` function
:type val_every: int
:param target_update_period: Update the target Q-network every X episodes
:type target_update_period: int
:param target_update_tau: Parameter for softening the `target_update_period`
:type target_update_tau: float
:param progressbar: Enable or disable the progressbar for collecting data and training
:type progressbar: bool
:return: None
:rtype: NoneType
"""
self.episodes = episodes # Total episodes
self.warmup_steps = warmup_steps # Amount of warmup steps before training
self.batch_size = batch_size # Batch size of Replay Memory
self.collect_steps_per_episode = collect_steps_per_episode # Amount of steps to collect data each episode
self.collect_every = collect_every # Step interval to collect data during training
self.learning_rate = learning_rate # Learning Rate
self.gamma = gamma # Discount factor
self.min_epsilon = min_epsilon # Minimal chance of choosing random action
self.decay_episodes = decay_episodes # Number of episodes to decay from 1.0 to `EPSILON`
self.target_update_period = target_update_period # Period for soft updates
self.target_update_tau = target_update_tau
self.progressbar = progressbar # Enable or disable the progressbar for collecting data and training
self.n_step_update = n_step_update
self.gradient_clipping = gradient_clipping # Clip the loss
self.compiled = False
NOW = "DQN" #datetime.now().strftime("%Y%m%d_%H%M%S")
if memory_length is not None:
self.memory_length = memory_length # Max Replay Memory length
else:
self.memory_length = warmup_steps
if val_every is not None:
self.val_every = val_every # Validate the policy every `val_every` episodes
else:
self.val_every = self.episodes // min(50, self.episodes) # Can't validate the model 50 times if self.episodes < 50
if model_path is not None:
#if os.path.exists(model_path + "/" + NOW + ".pkl"):
# os.remove(model_path + "/" + NOW + ".pkl")
self.model_path = model_path + "/" + NOW + ".pkl"
else:
self.model_path = "./models/" + NOW + ".pkl"
if log_dir is None:
log_dir = "./logs/" + NOW
self.writer = tf.summary.create_file_writer(log_dir)
def compile_model(self, X_train, y_train, layers: list = [], imb_ratio: float = None, loss_fn=common.element_wise_squared_loss) -> None:
"""Initializes the neural networks, DDQN-agent, collect policies and replay buffer.
:param X_train: Training data for the model.
:type X_train: np.ndarray
:param y_train: Labels corresponding to `X_train`. 1 for the positive class, 0 for the negative class.
:param y_train: np.ndarray
:param layers: List of layers to feed into the TF-agents custom Sequential(!) layer.
:type layers: list
:param imb_ratio: The imbalance ratio of the data.
:type imb_ratio: float
:param loss_fn: Callable loss function
:type loss_fn: tf.compat.v1.losses
:return: None
:rtype: NoneType
"""
if imb_ratio is None:
imb_ratio = imbalance_ratio(y_train)
self.train_env = TFPyEnvironment(ClassifierEnv(X_train, y_train, imb_ratio))
self.global_episode = tf.Variable(0, name="global_episode", dtype=np.int64, trainable=False) # Global train episode counter
# Custom epsilon decay: https://github.com/tensorflow/agents/issues/339
epsilon_decay = tf.compat.v1.train.polynomial_decay(
1.0, self.global_episode, self.decay_episodes, end_learning_rate=self.min_epsilon)
self.q_net = Sequential(layers, self.train_env.observation_spec())
self.agent = DqnAgent(self.train_env.time_step_spec(),
self.train_env.action_spec(),
q_network=self.q_net,
optimizer=Adam(learning_rate=self.learning_rate),
td_errors_loss_fn=loss_fn,
train_step_counter=self.global_episode,
target_update_period=self.target_update_period,
target_update_tau=self.target_update_tau,
gamma=self.gamma,
epsilon_greedy=epsilon_decay,
n_step_update=self.n_step_update,
gradient_clipping=self.gradient_clipping)
self.agent.initialize()
self.random_policy = RandomTFPolicy(self.train_env.time_step_spec(), self.train_env.action_spec())
self.replay_buffer = TFUniformReplayBuffer(data_spec=self.agent.collect_data_spec,
batch_size=self.train_env.batch_size,
max_length=self.memory_length)
self.warmup_driver = DynamicStepDriver(self.train_env,
self.random_policy,
observers=[self.replay_buffer.add_batch],
num_steps=self.warmup_steps) # Uses a random policy
self.collect_driver = DynamicStepDriver(self.train_env,
self.agent.collect_policy,
observers=[self.replay_buffer.add_batch],
num_steps=self.collect_steps_per_episode) # Uses the epsilon-greedy policy of the agent
self.agent.train = common.function(self.agent.train) # Optimalization
self.warmup_driver.run = common.function(self.warmup_driver.run)
self.collect_driver.run = common.function(self.collect_driver.run)
self.compiled = True
def train(self, *args) -> None:
"""Starts the training of the model. Includes warmup period, metrics collection and model saving.
:param *args: All arguments will be passed to `collect_metrics()`.
This can be usefull to pass callables, testing environments or validation data.
Overwrite the TrainDQN.collect_metrics() function to use your own *args.
:type *args: Any
:return: None
:rtype: NoneType, last step is saving the model as a side-effect
"""
assert self.compiled, "Model must be compiled with model.compile_model(X_train, y_train, layers) before training."
# Warmup period, fill memory with random actions
if self.progressbar:
print(f"\\033[92mCollecting data for {self.warmup_steps:_} steps... This might take a few minutes...\\033[0m")
self.warmup_driver.run(time_step=None, policy_state=self.random_policy.get_initial_state(self.train_env.batch_size))
if self.progressbar:
print(f"\\033[92m{self.replay_buffer.num_frames():_} frames collected!\\033[0m")
dataset = self.replay_buffer.as_dataset(sample_batch_size=self.batch_size, num_steps=self.n_step_update + 1,
num_parallel_calls=data.experimental.AUTOTUNE).prefetch(data.experimental.AUTOTUNE)
iterator = iter(dataset)
def _train |
():
experiences, _ = next(iterator)
return self.agent.train(experiences).loss
_train = common.function(_train) # Optimalization
ts = None
policy_state = self.agent.collect_policy.get_initial_state(self.train_env.batch_size)
print('Before Collect Metrics')
self.collect_metrics(*args) # Initial collection for step 0
print('After Collect Metrics')
for _ in range(self.episodes):
if not self.global_episode % self.collect_every:
# Collect a few steps using collect_policy and save to `replay_buffer`
if self.collect_steps_per_episode != 0:
ts, policy_state = self.collect_driver.run(time_step=ts, policy_state=policy_state)
# Sample a batch of data from `replay_buffer` and update the agent's network
train_loss = _train()
if not self.global_episode % self.val_every:
with self.writer.as_default():
tf.summary.scalar("train_loss", train_loss, step=self.global_episode)
self.collect_metrics(*args)
def collect_metrics(self, X_val: np.ndarray, y_val: np.ndarray, save_best: str = None):
"""Collects metrics using the trained Q-network.
:param X_val: Features of validation data, same shape as X_train
:type X_val: np.ndarray
:param y_val: Labels of validation data, same shape as y_train
:type y_val: np.ndarra
:param save_best: Saving the best model of all validation runs based on given metric:
Choose one of: {Gmean, F1, Precision, Recall, TP, TN, FP, FN}
This improves stability since the model at the last episode is not guaranteed to be the best model.
:type save_best: str
"""
y_pred = network_predictions(self.agent._target_q_network, X_val)
print('classification_metrics')
stats = classification_metrics(y_val, y_pred)
print('Before AVGQ')
avgQ = np.mean(decision_function(self.agent._target_q_network, X_val)) # Max action for each x in X
print('After AVGQ')
if save_best is not None:
if not hasattr(self, "best_score"): # If no best model yet
self.best_score = 0.0
if stats.get(save_best) >= self.best_score: # Overwrite best model
self.save_network() # Saving directly to avoid shallow copy without trained weights
self.best_score = stats.get(save_best)
with self.writer.as_default():
tf.summary.scalar("AverageQ", avgQ, step=self.global_episode) # Average Q-value for this epoch
for k, v in stats.items():
tf.summary.scalar(k, v, step=self.global_episode)
def evaluate(self, X_test, y_test):
"""
Final evaluation of trained Q-network with X_test and y_test.
Optional PR and ROC curve comparison to X_train, y_train to ensure no overfitting is taking place.
:param X_test: Features of test data, same shape as X_train
:type X_test: np.ndarray
:param y_test: Labels of test data, same shape as y_train
:type y_test: np.ndarray
:param X_train: Features of train data
:type X_train: np.ndarray
:param y_train: Labels of train data
:type y_train: np.ndarray
"""
#if hasattr(self, "best_score"):
# print(f"\\033[92mBest score: {self.best_score:6f}!\\033[0m")
# network = self.load_network(self.model_path) # Load best saved model
#else:
# network = self.agent._target_q_network # Load latest target model
#network = self.load_network(self.model_path)
#if (X_train is not None) and (y_train is not None):
# plot_pr_curve(network, X_test, y_test, X_train, y_train)
# plot_roc_curve(network, X_test, y_test, X_train, y_train)
y_pred = network_predictions(self.agent._target_q_network, X_test)
return classification_metrics(y_test, y_pred)
def save_network(self):
print('save_network')
"""Saves Q-network as pickle to `model_path`."""
with open(self.model_path, "wb") as f: # Save Q-network as pickle
pickle.dump(self.agent._target_q_network, f)
def get_network(self):
"""Static method to load Q-network pickle from given filepath.
:param fp: Filepath to the saved pickle of the network
:type fp: str
:returns: The network-object loaded from a pickle file.
:rtype: tensorflow.keras.models.Model
"""
return self.agent._target_q_network
<s> import numpy as np
from tf_agents.environments.py_environment import PyEnvironment
from tf_agents.specs.array_spec import ArraySpec, BoundedArraySpec
from tf_agents.trajectories import time_step as ts
class ClassifierEnv(PyEnvironment):
"""
Custom `PyEnvironment` environment for imbalanced classification.
Based on https://www.tensorflow.org/agents/tutorials/2_environments_tutorial
"""
def __init__(self, X_train: np.ndarray, y_train: np.ndarray, imb_ratio: float):
"""Initialization of environment with X_train and y_train.
:param X_train: Features shaped: [samples, ..., ]
:type X_train: np.ndarray
:param y_train: Labels shaped: [samples]
:type y_train: np.ndarray
:param imb_ratio: Imbalance ratio of the data
:type imb_ratio: float
:returns: None
:rtype: NoneType
"""
#print('1')
self._action_spec = BoundedArraySpec(shape=(), dtype=np.int32, minimum=0, maximum=(len(np.unique(y_train)) - 1), name="action")
#print(y_train)
self._observation_spec = ArraySpec(shape=X_train.shape[1:], dtype=X_train.dtype, name="observation")
#print('3')
self._episode_ended = False
self.X_train = X_train
self.y_train = y_train
self.imb_ratio = imb_ratio # Imbalance ratio: 0 < imb_ratio < 1
self.id = np.arange(self.X_train.shape[0]) # List of IDs to connect X and y data
self.episode_step = 0 # Episode step, resets every episode
self._state = self.X_train[self.id[self.episode_step]]
def action_spec(self):
"""
Definition of the discrete actionspace.
1 for the positive/minority class, 0 for the negative/majority class.
"""
return self._action_spec
def observation_spec(self):
"""Definition of the continous statespace e.g. the observations in typical RL environments."""
return self._observation_spec
def _reset(self):
"""Shuffles data and returns the first state of the shuffled data to begin training on new episode."""
np.random.shuffle(self.id) # Shuffle the X and y data
self.episode_step = 0 # Reset episode step counter at the end of every episode
self._state = self.X_train[self.id[self.episode_step]]
self._episode_ended = False # Reset terminal condition
return ts.restart(self._state)
def _step(self, action: int):
"""
Take one step in the environment.
If the action is correct, the environment will either return 1 or `imb_ratio` depending on the current class.
If the action is incorrect, the environment will either return -1 or -`imb_ratio` depending on the current class.
"""
if self._episode_ended:
# The last action ended the episode. Ignore the current action and start a new episode
return self.reset()
env_action = self.y_train[self.id[self.episode_step]] # The label of the current state
self.episode_step += 1
if action == env_action: # Correct action
if env_action: # Minority
reward = 1 # True Positive
else: # Majority
reward = self.imb_ratio # True Negative
else: # Incorrect action
if env_action: # Minority
reward = -1 # False Negative
self._episode_ended = True # Stop episode when minority class is misclassified
else: # Majority
reward = -self.imb_ratio # False Positive
if self.episode_step == self.X_train.shape[0] - 1: # If last step in data
self._episode_ended = True
self._state = self.X_train[self.id[self.episode_step]] # Update state with new datapoint
if self._episode_ended:
return ts.termination(self._state, reward)
else:
return ts.transition(self._state, reward)
<s><s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
import numpy as np
import logging
logging.getLogger('tensorflow').disabled = True
import aif360
from aif360.datasets import StandardDataset
from aif360.algorithms.preprocessing.reweighing import Reweighing
from aif360.algorithms.preprocessing import DisparateImpactRemover
class DebiasingManager:
def __init__(self):
self.data = ''
# ------------------------------- Debiasing Changes -------------------------------
def get_attributes(self, data, selected_attr=None):
unprivileged_groups = []
privileged_groups = []
if selected_attr == None:
selected_attr = data.protected_attribute_names
for attr in selected_attr:
idx = data.protected_attribute_names.index(attr)
privileged_groups.append({attr:data.privileged_protected_attributes[idx]})
unprivileged_groups.append({attr:data.unprivileged_protected_attributes[idx]})
return privileged_groups, unprivileged_groups
# ------------------------------- -------------------------------
def Bias_Mitigate(self, dataFrame, protected_feature, privileged_className, target_feature, algorithm):
# log = logging.getLogger('eion')
# log.propagate = False
data_encoded = dataFrame.copy()
categorical_names = {}
encoders = {}
dataFrame = dataFrame.replace('Unknown', 'NA')
dataFrame = dataFrame.replace(np.nan, 'NA')
try:
# Label-Encoding
for feature in dataFrame.columns:
le = LabelEncoder()
le.fit(data_encoded[feature])
data_encoded[feature] = le.transform(data_encoded[feature])
categorical_names[feature] = le.classes_
encoders[feature] = le
privileged_class = np.where(categorical_names[protected_feature] == privileged_className)[0]
target_feature_count = len(data_encoded[target_feature].value_counts())
# Check if it's BinaryLabel
if target_feature_count == 2:
binaryLabelDataset = aif360.datasets.BinaryLabelDataset(
favorable_label='1',
unfavorable_label='0',
df=data_encoded,
label_names=[target_feature],
protected_attribute_names=[protected_feature])
data_orig = binaryLabelDataset
# Check if it's Non-BinaryLabel
if target_feature_count > 2:
data_orig = StandardDataset(data_encoded,
label_name=target_feature,
favorable_classes=[1],
protected_attribute_names=[protected_feature],
privileged_classes=[privileged_class])
if algorithm == 'DIR':
DIR = DisparateImpactRemover(repair_level=0.9)
data_transf_train = DIR.fit_transform(data_orig)
# log.info('Status:-|... DIR applied on input dataset')
else:
privileged_groups, unprivileged_groups = self.get_attributes(data_orig, selected_attr=[protected_feature])
RW = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)
data_transf_train = RW.fit_transform(data_orig)
# log.info('Status:-|... Reweighing applied on input dataset')
transf_dataFrame = data_transf_train.convert_to_dataframe()[0]
data_decoded = transf_dataFrame.copy().astype('int')
for column in data_decoded.columns:
data_decoded[column] = encoders[column].inverse_transform(data_decoded[column])
debiased_dataFrame = data_decoded
except Exception as e:
print(e)
debiased_dataFrame = dataFrame
return debiased_dataFrame
<s><s><s> #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'appfe.ux.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
<s><s><s> """mpgWebApp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en |
/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import include, re_path
from appfe.api import inferenceApis
from django.urls import path, re_path
urlpatterns = [
#path('predict', inferenceApis.apipredict,name='PredictAPI'),
path('predict', inferenceApis.apipredict,name='PredictAPI'),
path('spredict',inferenceApis.apispredict,name='SecurePredictAPI'),
path('monitoring', inferenceApis.apiinputdrift,name='MonitoringAPI'),
path('performance', inferenceApis.apioutputdrift,name='Performance'),
path('xplain', inferenceApis.apixplain,name='Xplain'),
path('features',inferenceApis.apifeatures,name='Features'),
path('uploadfile',inferenceApis.uploadfile,name='uploadfile'),
path('retrain',inferenceApis.retrain,name='retrain'),
path('trainstatus',inferenceApis.trainstatus,name='trainstatus'),
path('publish',inferenceApis.publish,name='publish'),
path('geteda',inferenceApis.geteda,name='geteda'),
path('pattern_anomaly_settings',inferenceApis.apiPatternAnomalySettings,name='PatternAnomalySettings'),
path('pattern_anomaly_predict',inferenceApis.apiPatternAnomalyPredict,name='PatternAnomalyPredict')
]
#df=pd.read_csv("C:\\Project\\Analytics\\Deployment\\germancredit_9\\germancreditdata.csv")
#
#bool_cols = [col for col in df if np.isin(df[col].dropna().unique(), [0, 1]).all()]
#
#bool_cols
<s> from django.shortcuts import render
from django.http import HttpResponse
from appbe.dataPath import DEPLOY_LOCATION
from rest_framework import status
from django.db.models import Max, F
import os,sys
import time
import json
import re
import pandas as pd
from rest_framework.permissions import IsAuthenticated
from django.views.decorators.csrf import csrf_exempt
from appbe.dataPath import DATA_FILE_PATH
from appbe.dataPath import CONFIG_FILE_PATH
from appfe.modelTraining.models import usecasedetails
from appfe.modelTraining.models import Existusecases
import subprocess
from pathlib import Path
user_records = {}
@csrf_exempt
def geteda(request):
if request.method == 'POST':
if request.content_type == 'application/json':
output = {}
try:
data=request.body.decode('utf-8')
data = json.loads(data)
file_id = data['fileid']
edaOptions = 'All'
if 'options' in data:
edaOptions = data['options']
dataFile = os.path.join(DATA_FILE_PATH,file_id)
from appbe.eda import ux_eda
eda_obj = ux_eda(dataFile)
if 'dataoverview' in edaOptions.lower() or 'all' in edaOptions.lower():
dataDistributionDF = eda_obj.dataDistribution()
dataDistributionJson = dataDistributionDF.to_json(orient = 'records')
output['DataOverview'] = json.loads(dataDistributionJson)
if 'top10records' in edaOptions.lower() or 'all' in edaOptions.lower():
top10df = eda_obj.getTopRows(10)
top10dfJson = top10df.to_json(orient = 'records')
output['Top10Records'] = json.loads(top10dfJson)
if 'datadistribution' in edaOptions.lower() or 'all' in edaOptions.lower():
distributionJson = eda_obj.getDistribution()
output['DataDistribution'] = distributionJson
if "featureimportance" in edaOptions.lower() or 'all' in edaOptions.lower():
pca_map = eda_obj.getPCATop10Features()
pca_details = pca_map
pca_df=pd.DataFrame()
if len(pca_details) > 0:
pca_df = pd.DataFrame({'Feature':pca_details.index, 'Explained Variance Ratio':pca_details.values}).round(2)
pca_json = pca_df.to_json(orient="records")
output['FeatureImportance'] = json.loads(pca_json)
else:
pca_json = 'Error During feature importance processing'
output['FeatureImportance'] = pca_json
if "correlationanalysis" in edaOptions.lower() or 'all' in edaOptions.lower():
corr_mat = eda_obj.getCorrelationMatrix()
if not corr_mat.empty:
corr_mat = corr_mat.to_json(orient="columns")
output['CorrelationAnalysis'] = json.loads(corr_mat)
else:
output['CorrelationAnalysis'] = 'Error during correlation analysis'
if "unsupervisedclustering" in edaOptions.lower() or 'all' in edaOptions.lower():
clusteringDetails,hopkins_val = eda_obj.getClusterDetails()
output['UnsupervisedClustering'] = clusteringDetails
output['HopkinsValue'] = hopkins_val
except Exception as e:
print(e)
return HttpResponse(json.dumps({"status":"Success","output":output}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong Content Type"}),content_type="application/json")
@csrf_exempt
def publish(request):
usecaseid = request.GET["usecaseid"]
currentVersion = request.GET["version"]
if request.method == 'POST':
if request.content_type == 'application/json':
try:
from appbe.models import publishmodel
status,msg,url = publishmodel(request,usecaseid,currentVersion,Existusecases,usecasedetails)
return HttpResponse(json.dumps({"status":status,"msg":msg,"url":url}),content_type="application/json")
except Exception as e:
print(e)
return HttpResponse(json.dumps({"status":"error","msg":"model training exception"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong Content Type"}),content_type="application/json")
else:
msg = help_text(request,usecaseid,version)
return HttpResponse(msg,content_type="text/plain")
@csrf_exempt
def trainstatus(request):
usecaseid = request.GET["usecaseid"]
currentVersion = request.GET["version"]
if request.method == 'POST':
if request.content_type == 'application/json':
try:
data=request.body.decode('utf-8')
data = json.loads(data)
trainingid = int(data['trainingid'])
model = Existusecases.objects.get(id=trainingid)
if model.Status.lower() == 'success':
return HttpResponse(json.dumps({"status":"success","trainingStatus":"Trained","usecaseid":str(usecaseid),"version":str(model.Version)}),content_type="application/json")
else:
from appbe.training import checkversionrunningstatus
status = checkversionrunningstatus(trainingid,usecasedetails,Existusecases)
if status.lower() == 'success':
return HttpResponse(json.dumps({"status":"success","trainingStatus":"Trained","usecaseid":str(usecaseid),"version":str(model.Version)}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"success","trainingStatus":status,"usecaseid":str(usecaseid),"version":str(model.Version)}),content_type="application/json")
except Exception as e:
print(e)
return HttpResponse(json.dumps({"status":"error","msg":"model training exception"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong Content Type"}),content_type="application/json")
else:
msg = help_text(request,usecaseid,version)
return HttpResponse(msg,content_type="text/plain")
@csrf_exempt
def retrain(request):
usecaseid = request.GET["usecaseid"]
currentVersion = request.GET["version"]
if request.method == 'POST':
if request.content_type == 'application/json':
try:
data=request.body.decode('utf-8')
data = json.loads(data)
file_id = data['fileid']
p = usecasedetails.objects.get(usecaseid=usecaseid)
s1 = Existusecases.objects.filter(ModelName=p).annotate(maxver=Max('ModelName__existusecases__Version'))
config_list = s1.filter(Version=F('maxver'))
if config_list.count() > 0:
Version = config_list[0].Version
Version = Version + 1
model = Existusecases.objects.filter(ModelName=p,Version=currentVersion)
indexVal = 0
configfile = str(model[indexVal].ConfigPath)
f = open(configfile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
configSettingsJson['basic']['modelVersion'] = str(Version)
dataFile = configSettingsJson['basic']['dataLocation']
if os.path.isfile(dataFile):
data = pd.read_csv(dataFile,encoding='utf-8',skipinitialspace = True,na_values=['-','?'],encoding_errors= 'replace')
dataFile = os.path.join(DATA_FILE_PATH,file_id)
data2 = pd.read_csv(dataFile,encoding='utf-8',skipinitialspace = True,na_values=['-','?'],encoding_errors= 'replace')
data = data.append(data2,ignore_index=True)
data.to_csv(dataFile,index=False)
dataFile = os.path.join(DATA_FILE_PATH,file_id)
configSettingsJson['basic']['dataLocation'] = str(dataFile)
updatedConfigSettings = json.dumps(configSettingsJson)
filetimestamp = str(int(time.time()))
outputfile = os.path.join(CONFIG_FILE_PATH, 'AION_OUTPUT_' + filetimestamp + '.json')
config_json_filename = os.path.join(CONFIG_FILE_PATH, 'AION_' + filetimestamp + '.json')
with open(config_json_filename, "w") as fpWrite:
fpWrite.write(updatedConfigSettings)
fpWrite.close()
ps = Existusecases(DataFilePath=str(dataFile), DeployPath='', Status='Not Trained',ConfigPath=str(config_json_filename), Version=Version, ModelName=p,TrainOuputLocation=str(outputfile))
ps.save()
scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','aion.py'))
outputStr = subprocess.Popen([sys.executable, scriptPath,'-m','training','-c',config_json_filename])
ps.Status = 'Running'
ps.trainingPID = outputStr.pid
ps.save()
return HttpResponse(json.dumps({"status":"success","trainingid":str(ps.id),"version":str(ps.Version),"usecaseid":usecaseid}),content_type="application/json")
'''
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'aion_learner_status:(.*)', str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
resultJsonObj = json.loads(outputStr)
ps.Status = resultJsonObj['status']
if resultJsonObj['status'] == 'SUCCESS':
ps.modelType = resultJsonObj['data']['ModelType']
ps.DeployPath = str(resultJsonObj['data']['deployLocation'])
if resultJsonObj['data']['ModelType'] in ['clustering','anomalydetection', 'timeSeriesAnomalyDetection']: #task 11997
ps.ProblemType = 'unsupervised'
else:
ps.ProblemType = 'supervised'
ps.save()
'''
else:
return HttpResponse(json.dumps({"status":"error","msg":'Existing trained model not found'}),content_type="application/json")
except Exception as e:
print(e)
return HttpResponse(json.dumps({"status":"error","msg":"model training exception"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong Content Type"}),content_type="application/json")
else:
msg = help_text(request,usecaseid,version)
return HttpResponse(msg,content_type="text/plain")
@csrf_exempt
def uploadfile(request):
try:
if 'file' not in request.FILES:
msg = 'No file part in the request'
return HttpResponse(json.dumps({"status":"error","msg":msg}),content_type="application/json")
else:
file = request.FILES['file']
if file.size > 31457280:
msg = 'Upload limit is 30 MB only'
ext = str(file).split('.')[-1]
if ext.lower() in ['csv','tsv','tar','zip','avro','parquet']:
filetimestamp = str(int(time.time()))
file_id = 'AION_' + filetimestamp+'.'+ext
dataFile = os.path.join(DATA_FILE_PATH,file_id)
with open(dataFile, 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
destination.close()
return HttpResponse(json.dumps({"status":"success","fileid":file_id}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error |
","msg":"File extension not supported"}),content_type="application/json")
except Exception as e:
print(e)
return HttpResponse(json.dumps({"status":"error","msg":"File upload exception"}),content_type="application/json")
def help_text(request,usecaseid,version):
hosturl =request.get_host()
url='http://'+hosturl+'/api/'
msg = """
Request_Type: Post
Content_Type: applicattion/json
For Prediction URL: {url}'predict?usecaseid={usecaseid}&version={version}
For Explanations URL: {url}xplain?usecaseid={usecaseid}&version={version}
For Input Drift URL: {url}monitoring?usecaseid={usecaseid}&version={version}
For Output Drift URL: {url}performance?usecaseid={usecaseid}&version={version}
BODY: Data in json format
""".format(url=url,usecaseid=usecaseid,version=version)
return msg
@csrf_exempt
def apispredict(request):
usecaseid = request.GET["usecaseid"]
version = request.GET["version"]
if request.method == 'POST':
if request.content_type == 'application/json':
model_path = os.path.join(DEPLOY_LOCATION,usecaseid,str(version))
isdir = os.path.isdir(model_path)
if isdir:
try:
data=request.body.decode('utf-8')
predict_path = os.path.join(model_path,'aion_spredict.py')
outputStr = subprocess.check_output([sys.executable,predict_path,data])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1)
resp = outputStr.strip()
return HttpResponse(resp,content_type="application/json")
except Exception as e:
print(e)
return HttpResponse(json.dumps({"status":"error","msg":"Bad Request"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong UseCaseID or Version"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong Content Type"}),content_type="application/json")
else:
msg = help_text(request,usecaseid,version)
return HttpResponse(msg,content_type="text/plain")
@csrf_exempt
def apipredict(request):
usecaseid = request.GET["usecaseid"]
version = request.GET["version"]
#print(request.content_type)
if request.method == 'POST':
if request.content_type in ['application/json','multipart/form-data']:
model_path = os.path.join(DEPLOY_LOCATION,usecaseid,str(version))
isdir = os.path.isdir(model_path)
if isdir:
try:
data = ''
msg = 'Bad request'
if 'file' not in request.FILES:
data=request.body.decode('utf-8')
else:
file = request.FILES['file']
if file.size > 31457280:
msg = 'Upload limit is 30 MB only'
else:
ext = str(file).split('.')[-1]
if ext.lower() in ['csv','tsv','tar','zip','avro','parquet']:
urlData = file.read()
import io
rawData = pd.read_csv(io.StringIO(urlData.decode('utf-8')))
data = rawData.to_json(orient='records')
else:
msg = 'Extension not supported'
if data != '':
predict_path = os.path.join(model_path,'aion_predict.py')
outputStr = subprocess.check_output([sys.executable,predict_path,data])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1)
resp = outputStr.strip()
return HttpResponse(resp,content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":msg}),content_type="application/json")
except Exception as e:
print(e)
return HttpResponse(json.dumps({"status":"error","msg":"Bad Request"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong UseCaseID or Version"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong Content Type"}),content_type="application/json")
else:
msg = help_text(request,usecaseid,version)
return HttpResponse(msg,content_type="text/plain")
@csrf_exempt
def apiinputdrift(request):
usecaseid = request.GET["usecaseid"]
version = request.GET["version"]
if request.method == 'POST':
if request.content_type == 'application/json':
model_path = os.path.join(DEPLOY_LOCATION,usecaseid,str(version))
isdir = os.path.isdir(model_path)
if isdir:
try:
data=request.body.decode('utf-8')
predict_path = os.path.join(model_path,'aion_ipdrift.py')
outputStr = subprocess.check_output([sys.executable,predict_path,data])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'drift:(.*)',str(outputStr), re.IGNORECASE).group(1)
resp = outputStr.strip()
return HttpResponse(resp,content_type="application/json")
except Exception as e:
print(e)
return HttpResponse(json.dumps({"status":"error","msg":"Bad Request"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong UseCaseID or Version"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong Content Type"}),content_type="application/json")
else:
msg = help_text(request,usecaseid,version)
return HttpResponse(msg,content_type="text/plain")
@csrf_exempt
def apioutputdrift(request):
usecaseid = request.GET["usecaseid"]
version = request.GET["version"]
if request.method == 'POST':
if request.content_type == 'application/json':
model_path = os.path.join(DEPLOY_LOCATION,usecaseid,str(version))
isdir = os.path.isdir(model_path)
if isdir:
try:
data=request.body.decode('utf-8')
predict_path = os.path.join(model_path,'aion_opdrift.py')
outputStr = subprocess.check_output([sys.executable,predict_path,data])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'drift:(.*)',str(outputStr), re.IGNORECASE).group(1)
resp = outputStr.strip()
return HttpResponse(resp,content_type="application/json")
except Exception as e:
print(e)
return HttpResponse(json.dumps({"status":"error","msg":"Bad Request"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong UseCaseID or Version"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong Content Type"}),content_type="application/json")
else:
msg = help_text(request,usecaseid,version)
return HttpResponse(msg,content_type="text/plain")
@csrf_exempt
def apixplain(request):
usecaseid = request.GET["usecaseid"]
version = request.GET["version"]
if request.method == 'POST':
if request.content_type == 'application/json':
model_path = (Path(DEPLOY_LOCATION)/usecaseid)/str(version)
if model_path.is_dir():
try:
with open( (model_path/'etc')/'display.json', 'r') as f:
disp_data = json.load(f)
is_explainable = not disp_data.get('textFeatures')
except:
is_explainable = True
try:
if not is_explainable:
return HttpResponse(json.dumps({"status":"error","msg":"explain api is not supported when text features are used for training"}),content_type="application/json")
data=request.body.decode('utf-8')
predict_path = model_path/'aion_xai.py'
outputStr = subprocess.check_output([sys.executable,predict_path,'local',data]) #BugId:13304
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'aion_ai_explanation:(.*)',str(outputStr), re.IGNORECASE).group(1)
resp = outputStr.strip()
return HttpResponse(resp,content_type="application/json")
except Exception as e:
print(e)
return HttpResponse(json.dumps({"status":"error","msg":"Bad Request"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong UseCaseID or Version"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong Content Type"}),content_type="application/json")
else:
msg = help_text(request,usecaseid,version)
return HttpResponse(msg,content_type="text/plain")
#@api_view(['POST','GET'])
def apifeatures(request):
usecaseid = request.GET["usecaseid"]
version = request.GET["version"]
if request.content_type == 'application/json':
model_path = os.path.join(DEPLOY_LOCATION,usecaseid,str(version))
isdir = os.path.isdir(model_path)
if isdir:
try:
data=request.body.decode('utf-8')
predict_path = os.path.join(model_path,'featureslist.py')
outputStr = subprocess.check_output([sys.executable,predict_path,data])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1)
resp = outputStr.strip()
return HttpResponse(resp,content_type="application/json")
except Exception as e:
print(e)
return HttpResponse(json.dumps({"status":"error","msg":"Bad Request"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong UseCaseID or Version"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Wrong Content Type"}),content_type="application/json")
@csrf_exempt
def apiPatternAnomalySettings(request):
usecaseid = request.GET["usecaseid"]
version = request.GET["version"]
if request.content_type == 'application/json':
model_path = os.path.join(DEPLOY_LOCATION,usecaseid,str(version))
isdir = os.path.isdir(model_path)
if isdir:
try:
data=request.body.decode('utf-8')
data = json.loads(data)
groupswitching = data['groupswitching']
transitionprobability = data['transitionprobability']
transitionsequence = data['transitionsequence']
sequencethreshold = data['sequencethreshold']
filename = os.path.join(model_path,'clickstream.json')
print(filename)
data = {}
data['groupswitching'] = groupswitching
data['transitionprobability'] = transitionprobability
data['transitionsequence'] = transitionsequence
data['sequencethreshold'] = sequencethreshold
updatedConfig = json.dumps(data)
with open(filename, "w") as fpWrite:
fpWrite.write(updatedConfig)
fpWrite.close()
return HttpResponse(json.dumps({"status":'Success'}),content_type="application/json")
except Exception as e:
print(e)
return HttpResponse(json.dumps({"status":"error","msg":"Bad Request"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Bad Request"}),content_type="application/json")
else:
msg = help_text(request,usecaseid,version)
return HttpResponse(msg,content_type="text/plain")
#@api_view(['POST'])
@csrf_exempt
def apiPatternAnomalyPredict(request):
import pandas as pd
usecaseid = request.GET["usecaseid"]
version = request.GET["version"]
if request.content_type == 'application/json':
model_path = os.path.join(DEPLOY_LOCATION,usecaseid,str(version))
isdir = os.path.isdir(model_path)
if isdir:
try:
data=request.body.decode('utf-8')
data = json.loads(data)
anomaly = False
remarks = ''
clusterid = -1
configfilename = os.path.join(model_path,'datadetails.json')
filename = os.path.join(model_path,'clickstream.json')
clusterfilename = os.path.join(model_path,'stateClustering.csv')
probfilename = os.path.join(model_path,'stateTransitionProbability.csv')
dfclus = pd.read_csv(clusterfilename)
dfprod = pd.read_csv(probfilename)
f = open(configfilename, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
activity = configSettingsJson['activity']
sessionid = configSettingsJson['sessionid']
f = open(filename, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
groupswitching = configSettingsJson['groupswitching']
page_threshold = configSettingsJson['transitionprobability']
chain_count = configSettingsJson['transitionsequence']
chain_probability = configSettingsJson['sequencethreshold']
currentactivity = data[activity]
if bool(user_records):
sessionid = data[sessionid]
if sessionid != user_records['SessionID']:
user_records['SessionID'] = sessionid
prevactivity = ''
user_records['probarry'] = []
|
user_records['prevclusterid'] = -1
user_records['NoOfClusterHopping'] = 0
user_records['pageclicks'] = 1
else:
prevactivity = user_records['Activity']
user_records['Activity'] = currentactivity
pageswitch = True
if prevactivity == currentactivity or prevactivity == '':
probability = 0
pageswitch = False
remarks = ''
else:
user_records['pageclicks'] += 1
df1 = dfprod[(dfprod['State'] == prevactivity) & (dfprod['NextState'] == currentactivity)]
if df1.empty:
remarks = 'Anomaly Detected - User in unusual state'
anomaly = True
clusterid = -1
probability = 0
user_records['probarry'].append(probability)
n=int(chain_count)
num_list = user_records['probarry'][-n:]
avg = sum(num_list)/len(num_list)
for index, row in dfclus.iterrows():
clusterlist = row["clusterlist"]
if currentactivity in clusterlist:
clusterid = row["clusterid"]
else:
probability = df1['Probability'].iloc[0]
user_records['probarry'].append(probability)
n=int(chain_count)
num_list = user_records['probarry'][-n:]
davg = sum(num_list)/len(num_list)
for index, row in dfclus.iterrows():
clusterlist = row["clusterlist"]
if currentactivity in clusterlist:
clusterid = row["clusterid"]
remarks = ''
if user_records['prevclusterid'] != -1:
if probability == 0 and user_records['prevclusterid'] != clusterid:
user_records['NoOfClusterHopping'] = user_records['NoOfClusterHopping']+1
if user_records['pageclicks'] == 1:
remarks = 'Anomaly Detected - Frequent Cluster Hopping'
anomaly = True
else:
remarks = 'Cluster Hopping Detected'
user_records['pageclicks'] = 0
if user_records['NoOfClusterHopping'] > int(groupswitching) and anomaly == False:
remarks = 'Anomaly Detected - Multiple Cluster Hopping'
anomaly = True
elif probability == 0:
remarks = 'Anomaly Detected - Unusual State Transition Detected'
anomaly = True
elif probability <= float(page_threshold):
remarks = 'Anomaly Detected - In-frequent State Transition Detected'
anomaly = True
else:
if pageswitch == True:
if probability == 0:
remarks = 'Anomaly Detected - Unusual State Transition Detected'
anomaly = True
elif probability <= float(page_threshold):
remarks = 'Anomaly Detected - In-frequent State Transition Detected'
anomaly = True
else:
remarks = ''
if davg < float(chain_probability):
if anomaly == False:
remarks = 'Anomaly Detected - In-frequent Pattern Detected'
anomaly = True
else:
user_records['SessionID'] = data[sessionid]
user_records['Activity'] = data[activity]
user_records['probability'] = 0
user_records['probarry'] = []
user_records['chainprobability'] = 0
user_records['prevclusterid'] = -1
user_records['NoOfClusterHopping'] = 0
user_records['pageclicks'] = 1
for index, row in dfclus.iterrows():
clusterlist = row["clusterlist"]
if currentactivity in clusterlist:
clusterid = row["clusterid"]
user_records['prevclusterid'] = clusterid
outputStr = {'status':'SUCCESS','data':{'Anomaly':str(anomaly),'Remarks':str(remarks)}}
return HttpResponse(json.dumps(outputStr),content_type="application/json")
except Exception as e:
print(e)
return HttpResponse(json.dumps({"status":"error","msg":"Bad Request"}),content_type="application/json")
else:
return HttpResponse(json.dumps({"status":"error","msg":"Bad Request"}),content_type="application/json")
else:
msg = help_text(request,usecaseid,version)
return HttpResponse(msg,content_type="text/plain")<s> from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
import json
from appbe.dataPath import DEFAULT_FILE_PATH
from appbe.dataPath import DATA_FILE_PATH
from appbe.dataPath import CONFIG_FILE_PATH
from appbe.dataPath import DEPLOY_LOCATION
from appbe.pages import getversion
AION_VERSION = getversion()
import os
import time
import subprocess
import sys
import re
import pandas as pd
def mltesting(request):
from appbe.pages import mltesting_page
context = mltesting_page(request)
context['selected']='mltesting'
context['version'] = AION_VERSION
return render(request, 'mltesting.html',context)
def ABtest(request):
try:
if request.method == "POST":
models = request.POST["model"]
data = request.POST["data"]
#context['version'] = AION_VERSION
if(os.path.isfile(models) and os.path.isfile(data)):
AlgorithmNames={'LogisticRegression':'Logistic Regression','SGDClassifier':'Stochastic Gradient Descent','GaussianNB':'Naive Bayes','SVC':'Support Vector Machine','KNeighborsClassifier':'K Nearest Neighbors','DecisionTreeClassifier':'Decision Tree','RandomForestClassifier':'Random Forest','GradientBoostingClassifier':'Gradient Boosting','XGBClassifier':'Extreme Gradient Boosting (XGBoost)','DecisionTreeRegressor':'Decision Tree','LinearRegression':'Linear Regression','Lasso':'Lasso','Ridge':'Ridge','RandomForestRegressor':'Random Forest','XGBRegressor':'Extreme Gradient Boosting (XGBoost)'}
filetimestamp = str(int(time.time()))
mltestjson = os.path.join(DEFAULT_FILE_PATH, 'aion_config.json')
with open(mltestjson, 'r+') as f:
mltest = json.load(f)
f.close()
with open(request.session['MLTestResult'], 'r+') as f:
mltestresult = json.load(f)
f.close()
models = mltestresult['models']
datapath = mltestresult['datap']
featurs = mltestresult['feature']
featurs = featurs.split(",")
tar = mltestresult['target']
tar = tar.split(",")
# models,datap,Problemtype,targ,feature,Problem,Parameters,Accuracy
# models,datap,Problemtype,targ,feature,Problem,Parameters,Accuracy
mltest['basic']['modelName'] = 'MLtest_'+ str(filetimestamp)
mltest['basic']['modelVersion'] = "1"
mltest['basic']['dataLocation'] = mltestresult['datap']
mltest['basic']['deployLocation'] = DEPLOY_LOCATION
mltest['basic']['trainingFeatures'] = mltestresult['feature']
mltest['basic']['targetFeature'] = mltestresult['target']
mltest['advance']['profiler']['featureDict']=[]
temp = {}
Problemtype = mltestresult['Problemtype']
if Problemtype == 'Classification':
Accuracyscore1 = mltestresult['Score']
Accuracyscore = float(Accuracyscore1)*100
temp['ScoringCriteria'] = 'Accuracy'
else:
R2_Score = round(float(mltestresult['Score']),2)
temp['ScoringCriteria'] = 'R2'
baselineparam = mltestresult['Params']
temp['algorithm'] = []
if request.session["AionProblem"] == 'Samebaseline':
baselineprob = AlgorithmNames[mltestresult['ProblemName']]
temp['algorithm'].append(baselineprob)
else:
baselineprob = request.session["AionProblem"]
temp['algorithm'] = baselineprob.split(",")
#print(baselineprob)
temp['ProblemType'] = Problemtype
#temp['algorithm'] = ['K Nearest Neighbors']
problemtyp = mltest['basic']['analysisType']
scoring = mltest['basic']['scoringCriteria']
for i in list(scoring.keys()):
for x in list(mltest['basic']['scoringCriteria'][i].keys()):
mltest['basic']['scoringCriteria'][i][x] = 'False'
if temp['ProblemType'].lower() in ["classification","regression",]:
mltest['basic']['scoringCriteria'][temp['ProblemType'][0].lower() + temp['ProblemType'][1:]][temp['ScoringCriteria']] = 'True'
for i in list(problemtyp.keys()):
mltest['basic']['analysisType'][i]='False'
algorithm = mltest['basic']['algorithms']
for i in list(algorithm.keys()):
for x in list(mltest['basic']['algorithms'][i].keys()):
mltest['basic']['algorithms'][i][x] = 'False'
mltest['basic']['analysisType'][temp['ProblemType'][0].lower() + temp['ProblemType'][1:]] = 'True'
for X in temp['algorithm']:
mltest['basic']['algorithms'][temp['ProblemType'][0].lower() + temp['ProblemType'][1:]][X] = 'True'
mltest = json.dumps(mltest)
config_json_filename = os.path.join(CONFIG_FILE_PATH, 'MLtest' + filetimestamp + '.json')
with open(config_json_filename, "w") as fpWrite:
fpWrite.write(mltest)
fpWrite.close()
scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','aion.py'))
outputStr = subprocess.check_output([sys.executable, scriptPath,'-m','training','-c',config_json_filename])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'aion_learner_status:(.*)', str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
decoded_data = json.loads(outputStr)
#print(decoded_data)
if decoded_data['data']['ScoreType'] == 'R2':
decoded_data['data']['BestScore'] = str(round(float(decoded_data['data']['BestScore']),2))
if decoded_data['data']['ScoreType'].lower() == 'accuracy':
decoded_data['data']['BestScore'] = str(round(float(decoded_data['data']['BestScore']),2))
#print(decoded_data)
#print('123',Accuracyscore)
if Problemtype == 'Classification':
if Accuracyscore >= float(decoded_data['data']['BestScore']) :
context = {'modelname': models,'datapath':datapath,'features':featurs,'target':tar,'abtest':'abtest','message':'message','msg':'Existing model is good to be used.','classification':'classification','classuccess':'classuccess','selected':'mltesting','version':AION_VERSION}
else:
context = {'modelname': models,'datapath':datapath,'features':featurs,'target':tar,'tableab':'tableab','abtest':'abtest','decoded_data':decoded_data,'score':Accuracyscore,'basealgo':baselineprob,'Problem':AlgorithmNames[mltestresult['ProblemName']],'baseparm':baselineparam,'classification':'classification','classuccess':'classuccess','selected':'mltesting','version':AION_VERSION}
else:
if R2_Score >= float(decoded_data['data']['BestScore']) :
context = {'modelname': models,'datapath':datapath,'features':featurs,'target':tar,'abtest':'abtest','message':'message','msg':'Existing model is good to be used.','regression':'regression','regsuccess':'regsuccess','selected':'mltesting'}
else:
context = {'modelname': models,'datapath':datapath,'features':featurs,'target':tar,'tableab':'tableab','abtest':'abtest','decoded_data':decoded_data,'score':R2_Score,'basealgo':baselineprob,'Problem':AlgorithmNames[mltestresult['ProblemName']],'baseparm':baselineparam,'regression':'regression','regsuccess':'regsuccess','selected':'mltesting','version':AION_VERSION}
else:
context= {'error':'Error - Model file or Data file does not exist','abtesting':'abtesting','selected':'mltesting'}
context['version'] = AION_VERSION
return render(request, 'mltesting.html', context)
except Exception as e:
print(e)
context= {'error':'Error - Fail to perform A/B Testing','abtesting':'abtesting','selected':'mltesting'}
context['version'] = AION_VERSION
return render(request, 'mltesting.html', context)
def UQTesting(request):
try:
if request.method == "POST":
models = request.POST['modeluq']
datap = request.POST['datauq']
if(os.path.isfile(models) and os.path.isfile(datap)):
df = pd.read_csv(datap)
trainfea = df.columns.tolist()
featurs = request.POST.getlist('Traininguq')
feature = ",".join(featurs)
# features = ['PetalLengthCm','PetalWidthCm']
targ = request.POST['Targetuq']
tar =[targ]
from bin.aion_uncertainties import aion_uq
outputStr = aion_uq(models,datap,feature,tar)
print(outputStr)
uq_test = json.loads(outputStr)
#print("==============")
#print(uq_test)
#print("==============")
Problemtype= uq_test['Problem']
msg = uq_test['msg']
if Problemtype == 'Regression':
# Confidence_Interval_Plot = uq_test['Confidence Interval Plot']
# #print(Confidence_Interval_Plot)
# if Confidence_Interval_Plot != '':
# string = base64.b64encode(open(Confidence_Interval_Plot, "rb").read())
|
# Confidence_Interval_Plot = 'data:image/png;base64,' + urllib.parse.quote(string)
# PICP_Plot = uq_test['PICP Plot']
# if PICP_Plot != '':
# string = base64.b64encode(open(PICP_Plot, "rb").read())
# PICP_Plot = 'data:image/png;base64,' + urllib.parse.quote(string)
# Confidence_Plot = uq_test['Confidence Plot']
# if Confidence_Plot != '':
# string = base64.b64encode(open(Confidence_Plot, "rb").read())
# Confidence_Plot = 'data:image/png;base64,' + urllib.parse.quote(string)
if msg == "Good":
context={'Uqtest':'Uqtest','regression':'regression','modelname':models,'datapath':datap,'features':featurs,'target':tar,'trainfea':trainfea,'uq_reg':uq_test,'uqregression':'uqregression','dfuqr':df,'Green':'Green','selected':'mllite','version':AION_VERSION}
elif msg == "Bad":
context={'Uqtest':'Uqtest','regression':'regression','modelname':models,'datapath':datap,'features':featurs,'target':tar,'trainfea':trainfea,'uq_reg':uq_test,'uqregression':'uqregression','dfuqr':df,'Red':'Red','selected':'mllite','version':AION_VERSION}
else:
context={'Uqtest':'Uqtest','regression':'regression','modelname':models,'datapath':datap,'features':featurs,'target':tar,'trainfea':trainfea,'uq_reg':uq_test,'uqregression':'uqregression','dfuqr':df,'orange':'orange','selected':'mllite','version':AION_VERSION}
elif Problemtype == 'Classification':
# df3 = pd.DataFrame.from_dict(uq_test,orient='index')
df3 = pd.DataFrame.from_dict(uq_test, orient='index', columns=['value'])
df3 = df3.rename_axis('Attributes').reset_index()
# risk_plot = uq_test['Risk Plot']
# if risk_plot != '':
# string = base64.b64encode(open(risk_plot, "rb").read())
# risk_plot = 'data:image/png;base64,' + urllib.parse.quote(string)
# reliability_plot = uq_test['Reliability Plot']
# if reliability_plot != '':
# string = base64.b64encode(open(reliability_plot, "rb").read())
# reliability_plot = 'data:image/png;base64,' + urllib.parse.quote(string)
df3['Attributes'] = df3['Attributes'].str.replace(r'_', ' ')
# df3.loc[(df3.Attributes == 'Model_total_confidence_score'),'Attributes']='Model Total Confidence'
# df3.loc[(df3.Attributes == 'Expected_Calibration_Error'),'Attributes']='Expected Calibration Error'
df3 = df3.iloc[4:, :]
report = df3.to_html(index=False)
if msg == "Good":
context={'Uqtest':'Uqtest','classification':'classification','modelname':models,'datapath':datap,'features':featurs,'target':tar,'uqclassification':'uqclassification','uq_class':uq_test,'report':report,'selected':'mltesting','selected':'mllite','version':AION_VERSION}
elif msg == "Bad":
context={'Uqtest':'Uqtest','classification':'classification','modelname':models,'datapath':datap,'features':featurs,'target':tar,'uqclassification':'uqclassification','uq_class':uq_test,'report':report,'selected':'mltesting','selected':'mllite','version':AION_VERSION}
else:
context={'Uqtest':'Uqtest','classification':'classification','modelname':models,'datapath':datap,'features':featurs,'target':tar,'uqclassification':'uqclassification','uq_class':uq_test,'report':report,'selected':'mltesting','selected':'mllite','version':AION_VERSION}
elif Problemtype == 'None':
#print('hi')
context={'Uqerror':'Uqerror','errormsg':"Error:"+str(msg),'version':AION_VERSION}
else:
context= {'error':'Please enter valid inputs','UQtesting':'UQtesting','selected':'mllite','version':AION_VERSION}
return render(request, 'mltesting.html', context)
except Exception as e:
print("uqregression error: ",e)
context= {'error':'Error - Fail to perform Uncertainty Quantification ','UQtesting':'UQtesting','selected':'mllite','version':AION_VERSION}
return render(request, 'mltesting.html', context) <s> from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
from appbe.pages import getusercasestatus
from appbe.pages import getversion
AION_VERSION = getversion()
from appfe.modelTraining.models import usecasedetails
from appfe.modelTraining.models import Existusecases
import os
from django.db.models import Max, F
import pandas as pd
from appbe.publish import check_input_data
from appbe.dataPath import DEFAULT_FILE_PATH
from appbe.dataPath import DATA_FILE_PATH
from appbe.dataPath import CONFIG_FILE_PATH
from appbe.dataPath import DEPLOY_LOCATION
from appbe import installPackage
import json
from appbe import compute
from appbe.training import checkModelUnderTraining
import logging
def opentraininglogs(request, id,currentVersion):
from appbe.pages import usecases_page
try:
from appbe import installPackage
modelID = installPackage.getMIDFromUseCaseVersion(id,currentVersion,usecasedetails,Existusecases)
p = Existusecases.objects.get(id=modelID)
configpath = str(p.ConfigPath)
file_exists = os.path.exists(configpath)
if not file_exists:
request.session['IsRetraining'] = 'No'
status,context,action = usecases_page(request,usecasedetails,Existusecases)
context['errorMsg'] = 'Error in model launching: Some of the files are missing'
return render(request,action,context)
usecasename = p.ModelName.UsecaseName
Version = p.Version
request.session['ModelName'] = p.ModelName.id
request.session['UseCaseName'] = usecasename
request.session['usecaseid'] = p.ModelName.usecaseid
request.session['ModelVersion'] = p.Version
request.session['deploypath'] = str(p.DeployPath)
request.session['config_json'] = configpath
usename = request.session['usecaseid'].replace(" ", "_")
request.session['logfilepath'] = os.path.join(DEPLOY_LOCATION,usename,str(request.session['ModelVersion']),'log','model_training_logs.log')
request.session['finalstate'] = 3
request.session['ModelStatus'] = p.Status
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r")
configSettings = f.read()
f.close()
configSettings = json.loads(configSettings)
problemtypes = configSettings['basic']['analysisType']
#print(problemtypes.keys())
problem_type = ""
for k in problemtypes.keys():
if configSettings['basic']['analysisType'][k] == 'True':
problem_type = k
break
if problem_type.lower() in ['videoforecasting','imageclassification','objectdetection','document','llmfinetuning']:
request.session['datatype'] = configSettings['basic']['folderSettings']['fileType']
request.session['csvfullpath'] = configSettings['basic']['folderSettings']['labelDataFile']
request.session['datalocation'] = configSettings['basic']['dataLocation']
if problem_type.lower() == 'llmfinetuning':
request.session['fileExtension'] = configSettings['basic']['folderSettings']['fileExtension']
else:
request.session['datalocation'] = str(p.DataFilePath)
request.session['datatype'] = 'Normal'
if 'fileSettings' in configSettings['basic'].keys():
fileSettings = configSettings['basic']['fileSettings']
if 'delimiters' in fileSettings.keys():
delimiters = configSettings['basic']['fileSettings']['delimiters']
textqualifier = configSettings['basic']['fileSettings']['textqualifier']
request.session['delimiter'] = delimiters
request.session['textqualifier'] = textqualifier
else:
request.session['delimiter'] = ','
request.session['textqualifier'] = '"'
from appfe.modelTraining.views import uploaddata
return uploaddata(request)
except Exception as e:
print(e)
return render(request, 'usecases.html',{'error': 'Failed to launch model. Please train the model first before launching.','selected': 'prediction','version':AION_VERSION})
def retrain(request, id,currentVersion):
from appbe.aion_config import eda_setting
from appbe.pages import usecases_page
from appbe.aion_config import settings
usecasetab = settings()
try:
p = usecasedetails.objects.get(id=id)
s1 = Existusecases.objects.filter(ModelName=id).annotate(maxver=Max('ModelName__existusecases__Version'))
config_list = s1.filter(Version=F('maxver'))
if config_list.count() > 0:
Version = config_list[0].Version
Version = Version + 1
else:
Version = 1
usecasename = p.UsecaseName
request.session['ModelName'] = p.id
request.session['UseCaseName'] = usecasename
request.session['usecaseid'] = p.usecaseid
request.session['ModelVersion'] = Version
request.session['ModelStatus'] = 'Not Trained'
request.session['finalstate'] = 0
usecase = usecasedetails.objects.all().order_by('-id')
# Retraing settings changes
# -------- S T A R T --------
model = Existusecases.objects.filter(ModelName=p,Version=currentVersion)
samplePercentage = 100
samplePercentval = 0
showRecommended = False
if(model.count() > 0):
indexVal = 0
configfile = str(model[indexVal].ConfigPath)
f = open(configfile, "r")
configSettings = f.read()
f.close()
configSettings = json.loads(configSettings)
dataFile = configSettings['basic']['dataLocation']
if configSettings['basic']['folderSettings']['fileType'] == 'Object':
request.session['datatype'] = configSettings['basic']['folderSettings']['fileType']
request.session['objectLabelFileName'] = configSettings['basic']['folderSettings']['labelDataFile']
request.session['datalocation'] = configSettings['basic']['dataLocation']
return objectlabeldone(request)
else:
request.session['datalocation'] = str(configSettings['basic']['dataLocation'])
request.session['datatype'] = 'Normal'
if 'fileSettings' in configSettings['basic'].keys():
fileSettings = configSettings['basic']['fileSettings']
if 'delimiters' in fileSettings.keys():
delimiters = configSettings['basic']['fileSettings']['delimiters']
textqualifier = configSettings['basic']['fileSettings']['textqualifier']
request.session['delimiter'] = delimiters
request.session['textqualifier'] = textqualifier
else:
request.session['delimiter'] = ','
request.session['textqualifier'] = '"'
df = pd.read_csv(dataFile, encoding='utf8',nrows=10,encoding_errors= 'replace')
records = df.shape[0]
df1 = check_input_data(usecasename)
if df1.shape[0] > 0:
df = pd.read_csv(dataFile, encoding='utf8',encoding_errors= 'replace')
df = df.append(df1, ignore_index=True)
df = df.reset_index(drop=True)
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
df.to_csv(dataFile, index=False)
print(df.shape[0])
request.session['datalocation'] = str(dataFile)
request.session['NoOfRecords'] = records
request.session['IsRetraining'] = 'Yes'
df_top = df.head(10)
df_json = df_top.to_json(orient="records")
df_json = json.loads(df_json)
# from AION import ux_eda
# eda_obj = ux_eda(dataFile)
# featuresList,datetimeFeatures,sequenceFeatures,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature = eda_obj.getFeatures()
featuresList = df.columns.tolist()
numberoffeatures = len(featuresList)
from appfe.modelTraining.views import getimpfeatures
imp_features = getimpfeatures(dataFile,numberoffeatures)
check_df = pd.read_csv(dataFile,encoding='utf8',encoding_errors= 'replace')
# EDA Performance change
# ----------------------------
sample_size = int(eda_setting())
# dflength = len(eda_obj.getdata())
dflength = len(check_df)
if dflength > sample_size:
samplePercentage = int((sample_size/dflength) * 100)
samplePercentval = samplePercentage / 100
showRecommended = True
# ----------------------------
statusmsg = 'Data loaded Successfully for Retraining.'
computeinfrastructure = compute.readComputeConfig()
# ----------------------------
selected_use_case = request.session['UseCaseName']
ModelVersion = Version
ModelStatus = 'Not Trained'
if len(usecase) > 0:
nouc = usecasedetails.objects.latest('id')
nouc = (nouc.id)+1
else:
nouc = 1
# Retraing settings changes
# -------- S T A R T --------
# return render(request, 'usecases.html', {'usecasedetail': usecase,'nouc':nouc,'models': models, 'selectedusecase': usecasename,
# 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,
# 'ModelVersion': ModelVersion, 'selected': 'usecase'})
ps = Existusecases(DataFilePath=request.session['datalocation'], DeployPath='', Status='Not Trained',ConfigPath=configfile, Version=Version, ModelName=p, |
TrainOuputLocation='')
ps.save()
if(model.count() > 0):
context = {'range':range(1,101),'samplePercentage':samplePercentage, 'samplePercentval':samplePercentval, 'showRecommended':showRecommended,'featuresList': featuresList, 'tab': 'tabconfigure','data': df_json,'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning','exploratory':False, 'status_msg': statusmsg,'computeinfrastructure':computeinfrastructure,'IsRetrainingModel':True,'imp_features':imp_features,'numberoffeatures':numberoffeatures, 'dataSetPath': dataFile,'usecasetab':usecasetab,'finalstate':request.session['finalstate'],'version':AION_VERSION}
else:
context = {'tab': 'tabconfigure','selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'Modelretrain':request.session['ModelVersion'],'finalstate':request.session['finalstate'],'version':AION_VERSION}
context['version'] = AION_VERSION
return render(request, 'upload.html', context)
except Exception as e:
print(e)
checkModelUnderTraining(request,usecasedetails,Existusecases)
request.session['IsRetraining'] = 'No'
status,context,action = usecases_page(request,usecasedetails,Existusecases)
#print(context)
context['version'] = AION_VERSION
context['Status'] = 'Error'
context['Msg'] = 'Error in retraining usecase. Check log file for more details'
return render(request,action,context)
def launchmodel(request, id,version):
from appbe.pages import usecases_page
try:
modelID = installPackage.getMIDFromUseCaseVersion(id,version,usecasedetails,Existusecases)
p = Existusecases.objects.get(id=modelID)
configpath = str(p.ConfigPath)
file_exists = os.path.exists(configpath)
if not file_exists:
request.session['IsRetraining'] = 'No'
status,context,action = usecases_page(request,usecasedetails,Existusecases)
context['errorMsg'] = 'Error in model launching: Some of the files are missing'
return render(request,action,context)
usecasename = p.ModelName.UsecaseName
Version = p.Version
request.session['ModelName'] = p.ModelName.id
request.session['UseCaseName'] = usecasename
request.session['usecaseid'] = p.ModelName.usecaseid
request.session['ModelVersion'] = p.Version
request.session['deploypath'] = str(p.DeployPath)
request.session['config_json'] = configpath
usename = request.session['usecaseid'].replace(" ", "_")
request.session['logfilepath'] = os.path.join(DEPLOY_LOCATION,usename,str(request.session['ModelVersion']),'log','model_training_logs.log')
request.session['finalstate'] = 3
request.session['ModelStatus'] = p.Status
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r")
configSettings = f.read()
f.close()
configSettings = json.loads(configSettings)
problemtypes = configSettings['basic']['analysisType']
#print(problemtypes.keys())
problem_type = ""
for k in problemtypes.keys():
if configSettings['basic']['analysisType'][k] == 'True':
problem_type = k
break
if problem_type == 'videoForecasting' or problem_type == 'imageClassification' or problem_type == 'objectDetection':
request.session['datatype'] = configSettings['basic']['folderSettings']['fileType']
request.session['csvfullpath'] = configSettings['basic']['folderSettings']['labelDataFile']
request.session['datalocation'] = configSettings['basic']['dataLocation']
elif configSettings['basic']['folderSettings']['fileType'] == 'Document':
request.session['datatype'] = configSettings['basic']['folderSettings']['fileType']
request.session['csvfullpath'] = configSettings['basic']['folderSettings']['labelDataFile']
request.session['datalocation'] = configSettings['basic']['dataLocation']
else:
request.session['datalocation'] = str(p.DataFilePath)
request.session['datatype'] = 'Normal'
if 'fileSettings' in configSettings['basic'].keys():
fileSettings = configSettings['basic']['fileSettings']
if 'delimiters' in fileSettings.keys():
delimiters = configSettings['basic']['fileSettings']['delimiters']
textqualifier = configSettings['basic']['fileSettings']['textqualifier']
request.session['delimiter'] = delimiters
request.session['textqualifier'] = textqualifier
else:
request.session['delimiter'] = ','
request.session['textqualifier'] = '"'
from appfe.modelTraining.prediction_views import Prediction
return Prediction(request)
except Exception as e:
print(e)
return render(request, 'prediction.html',{'error': 'Failed to launch model. Please train the model first before launching.','selected': 'prediction','version':AION_VERSION})
def modxplain(request, id,version):
from appbe.pages import usecases_page
log = logging.getLogger('log_ux')
modelID = installPackage.getMIDFromUseCaseVersion(id,version,usecasedetails,Existusecases)
p = Existusecases.objects.get(id=modelID)
configpath = str(p.ConfigPath)
usecasename = p.ModelName.UsecaseName
Version = p.Version
request.session['ModelName'] = p.ModelName.id
request.session['UseCaseName'] = usecasename
request.session['usecaseid'] = p.ModelName.usecaseid
request.session['ModelVersion'] = p.Version
request.session['deploypath'] = str(p.DeployPath)
request.session['config_json'] = configpath
usename = request.session['usecaseid'].replace(" ", "_")
request.session['logfilepath'] = os.path.join(DEPLOY_LOCATION,usename,str(request.session['ModelVersion']),'log','model_training_logs.log')
request.session['finalstate'] = 3
request.session['ModelStatus'] = p.Status
file_exists = os.path.exists(configpath)
if not file_exists:
request.session['IsRetraining'] = 'No'
status,context,action = usecases_page(request,usecasedetails,Existusecases)
context['errorMsg'] = 'Error in model launching: Some of the files are missing'
log.info('modxplain:' + str(selected_use_case) + ':' + str(ModelVersion) + ':' + '0 ' + 'sec' + ':' + 'Error:Error in model launching: Some of the files are missing')
return render(request,action,context)
usecasename = p.ModelName.UsecaseName
Version = p.Version
request.session['ModelName'] = p.ModelName.id
request.session['UseCaseName'] = usecasename
request.session['usecaseid'] = p.ModelName.usecaseid
request.session['ModelVersion'] = p.Version
request.session['deploypath'] = str(p.DeployPath)
request.session['config_json'] = configpath
usename = request.session['usecaseid'].replace(" ", "_")
request.session['logfilepath'] = os.path.join(DEPLOY_LOCATION,usename,str(request.session['ModelVersion']),'log','model_training_logs.log')
request.session['finalstate'] = 3
request.session['ModelStatus'] = p.Status
from appfe.modelTraining import visualizer_views as v
return v.xplain(request)
def moddrift(request, id,version):
from appbe.pages import usecases_page
modelID = installPackage.getMIDFromUseCaseVersion(id,version,usecasedetails,Existusecases)
p = Existusecases.objects.get(id=modelID)
configpath = str(p.ConfigPath)
file_exists = os.path.exists(configpath)
if not file_exists:
request.session['IsRetraining'] = 'No'
status,context,action = usecases_page(request,usecasedetails,Existusecases)
context['errorMsg'] = 'Error in model launching: Some of the files are missing'
return render(request,action,context)
usecasename = p.ModelName.UsecaseName
Version = p.Version
request.session['ModelName'] = p.ModelName.id
request.session['UseCaseName'] = usecasename
request.session['usecaseid'] = p.ModelName.usecaseid
request.session['ModelVersion'] = p.Version
request.session['deploypath'] = str(p.DeployPath)
request.session['config_json'] = configpath
usename = request.session['usecaseid'].replace(" ", "_")
request.session['logfilepath'] = os.path.join(DEPLOY_LOCATION,usename,str(request.session['ModelVersion']),'log','model_training_logs.log')
request.session['finalstate'] = 3
request.session['ModelStatus'] = p.Status
f = open( configpath, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
trainingdataloc = configSettingsJson['basic']['dataLocation']
request.session['datalocation']= trainingdataloc
return inputdrift(request)
def inputdrift(request):
log = logging.getLogger('log_ux')
from appbe.aion_config import settings
usecasetab = settings()
from appbe import service_url
try:
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
computeinfrastructure = compute.readComputeConfig()
if ModelStatus != 'SUCCESS':
context = {'error': 'Please train the model first or launch an existing trained model', 'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'monitoring','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab}
log.info('Error Please train the model first or launch an existing trained model')
else:
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
problemtypes = configSettingsJson['basic']['analysisType']
problem_type = ""
for k in problemtypes.keys():
if configSettingsJson['basic']['analysisType'][k] == 'True':
problem_type = k
break
problem = problem_type
ser_url = service_url.read_monitoring_service_url_params(request)
iterName = request.session['usecaseid'].replace(" ", "_")
ModelVersion = request.session['ModelVersion']
ser_url = ser_url+'monitoring?usecaseid='+iterName+'&version='+str(ModelVersion)
pser_url = service_url.read_performance_service_url_params(request)
pser_url = pser_url+'performance?usecaseid='+iterName+'&version='+str(ModelVersion)
if problem.lower() not in ['classification','regression']:
context = {'error': 'Input drift only available for classification and regression problems', 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'version':AION_VERSION,
'ModelVersion': ModelVersion, 'selected': 'monitoring','ser_url':ser_url,'pser_url':pser_url,'trainingDataLocation':request.session['datalocation'],'computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab}
else:
context = {'SUCCESS': 'Model is trained', 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'version':AION_VERSION,
'ModelVersion': ModelVersion, 'selected': 'monitoring','ser_url':ser_url,'pser_url':pser_url,'trainingDataLocation':request.session['datalocation'],'computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab}
return render(request, 'inputdrif.html', context)
except Exception as e:
print(e)
log.info('inputdrift; Error: Failed to perform drift analysis'+str(e))
return render(request, 'inputdrif.html', {'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ModelVersion': ModelVersion, 'selected': 'monitoring','computeinfrastructure':computeinfrastructure,'version':AION_VERSION,'error':'Fail to do inputdrift analysis','usecasetab':usecasetab})
<s> import os,sys
import re
import logging
from django.http import HttpRequest, HttpResponse
from django.conf import settings
from django.shortcuts import render
from appbe.pages import getversion
import plotly.graph_objects as go
import plotly.figure_factory as ff
from django.shortcuts import render
from plotly.subplots import make_subplots
from django.contrib.sessions.models import Session
from sklearn.metrics import confusion_matrix
from IPython.core.display import HTML
from IPython.core import display
from django.template import Context, loader
import pandas as pd
import numpy as np
import io
import urllib, base64
from natsort import natsorted
import matplotlib.pyplot as plt
import plotly.express as px
import json
from IPython.core.display import display, HTML
from appbe import compute
import base64
import warnings
warnings.filterwarnings('ignore')
import subprocess
from appbe import installPackage
from appfe.modelTraining.models import usecasedetails
from appfe.modelTraining.models import Existusecases
from utils.file_ops import read_df_compressed
from appbe.dataPath import LOG_LOCATION
from appbe.log_ut import logg
import time
AION_VERSION = getversion()
def getusercasestatus(request):
if 'UseCaseName' in request.session:
selected_use_case = request.session['UseCaseName']
else:
selected_use_case = 'Not Defined'
if 'ModelVersion' in request.session:
ModelVersion = request.session['ModelVersion']
else:
ModelVersion = 0
if 'ModelStatus' in request.session:
ModelStatus = request.session['ModelStatus']
else:
ModelStatus = 'Not Trained'
return selected_use_case,ModelVersion,ModelStatus
def xplain(request):
log = logging.getLogger('log_ux')
computeinfrastructure = compute.readComputeConfig()
from appbe.aion_config import settings
usecasetab = settings()
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
if request.method == 'GET':
try:
if ModelStatus != 'SUCCESS':
log.info('xplain :' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error |
:Please train the model first or launch an existing trained model')
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please train the model first or launch an existing trained model','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
else:
if 'ModelVersion' not in request.session:
log.info('xplain : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error:Please train the model first')
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please trained the model first','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
elif request.session['ModelVersion'] == 0:
log.info('xplain : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please train the model first')
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please trained the model first','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
elif 'ModelStatus' not in request.session:
log.info('xplain : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please train the model first')
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please trained the model first','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
elif request.session['ModelStatus'] != 'SUCCESS':
log.info('xplain : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please train the model first')
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please trained the model first','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
problemType = 'classification'
for key in configSettingsJson['basic']['analysisType']:
if configSettingsJson['basic']['analysisType'][key] == 'True':
problemType = key
break
if problemType.lower() != 'classification' and problemType.lower() != 'regression':
log.info('xplain:' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0' + 'sec' + ' : ' + 'Error:Explainable AI only available for classification and regression problem')
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Explainable AI only available for classification and regression problem','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
displaypath = os.path.join( request.session['deploypath'],'etc','display.json')
with open(displaypath) as file:
config = json.load(file)
file.close()
inputFeatures = configSettingsJson['basic']['trainingFeatures']
targetFeature = configSettingsJson['basic']['targetFeature']
inputFeaturesList = inputFeatures.split(',')
if targetFeature in inputFeaturesList:
inputFeaturesList.remove(targetFeature)
dataFilePath = str(configSettingsJson['basic']['dataLocation'])
df = pd.read_csv(dataFilePath,nrows=10)
df.rename(columns=lambda x: x.strip(), inplace=True)
df = df[inputFeaturesList]
inputFieldsDict = df.to_dict(orient='index')[5]
inputFields = []
inputFields.append(inputFieldsDict)
targetfeatures = targetFeature.split(",")
##### Bug 20649 starts
featureDict = configSettingsJson['advance']['profiler']['featureDict']
catFeatureList = []
for feature in featureDict:
if feature['type'] == 'categorical':
catFeatureList.append(feature['feature'])
for feature in targetfeatures:
if feature in catFeatureList:
catFeatureList.remove(feature)
fairness_error = "" if len(catFeatureList)>0 else "Fairness metrics is not applicable as categorical feature(s) is not present."
##### Bug 20649 ends
context = {"fairness_error":fairness_error,"catFeatureList":catFeatureList,'selected_use_case':selected_use_case,'configSettings':configSettingsJson,'targetfeatures':targetfeatures,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'selected':'visualizer','subselected':'businessview','inputFields':inputFields,'computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION,'problemType':problemType}
return render(request, 'businessview.html', context)
except Exception as e:
log.info('xplain : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Unexpected error occur, '+str(e))
print(e)
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Unexpected error occur','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
if request.method == 'POST':
if request.POST.get("submit") == 'modelxplain':
return modelxplain(request)
if request.POST.get("submit") == 'xplainprediction':
return predictionxplain(request)
def modelxplain(request):
log = logging.getLogger('log_ux')
computeinfrastructure = compute.readComputeConfig()
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
from appbe.aion_config import settings
usecasetab = settings()
t1 = time.time()
if 'UseCaseName' not in request.session:
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please create the use case first, trained the model and then visualize the data','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
else:
if 'ModelVersion' not in request.session:
log.info('Xplain Model : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please trained the model first')
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please trained the model first','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
elif request.session['ModelVersion'] == 0:
log.info('Xplain Model : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please trained the model first')
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please trained the model first','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
elif 'ModelStatus' not in request.session:
log.info('Xplain Model : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please trained the model first')
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please trained the model first','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
elif request.session['ModelStatus'] != 'SUCCESS':
log.info('Xplain Model : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please trained the model first')
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Please trained the model first','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
try:
from appbe.telemetry import UpdateTelemetry
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'TrustedAI','Yes')
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
problemType = 'classification'
for key in configSettingsJson['basic']['analysisType']:
if configSettingsJson['basic']['analysisType'][key] == 'True':
problemType = key
break
if problemType.lower() != 'classification' and problemType.lower() != 'regression':
log.info('Xplain Model : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Explainable AI only available for classification and regression problem')
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error':'Explainable AI only available for classification and regression problem','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
displaypath = os.path.join( request.session['deploypath'],'etc','display.json')
with open(displaypath) as file:
config = json.load(file)
file.close()
targetFeature = configSettingsJson['basic']['targetFeature']
dataFilePath = str(configSettingsJson['basic']['dataLocation'])
status, df = read_df_compressed(config['postprocessedData'], nrows=10)
df.rename(columns=lambda x: x.strip(), inplace=True)
if targetFeature in df.columns:
df.drop( targetFeature, axis=1, inplace=True)
inputFieldsDict = df.to_dict(orient='index')[5]
inputFields = []
inputFields.append(inputFieldsDict)
if 'nrows' in config:
nrows = config['nrows']
else:
nrows = 'Not Available'
if 'ncols' in config:
ncols = config['ncols']
else:
ncols = 'Not Available'
if 'targetFeature' in config:
targetFeature = config['targetFeature']
else:
targetFeature = ''
labelMaps = config['labelMaps']
modelfeatures = configSettingsJson['basic']['trainingFeatures'].split(',')
mfcount = len(modelfeatures)
dataFilePath = str(configSettingsJson['basic']['dataLocation'])
df_proprocessed = pd.read_csv(dataFilePath,nrows=1000)
df_proprocessed.rename(columns=lambda x: x.strip(), inplace=True)
if 'targetFeature' != '':
target_classes = df_proprocessed[targetFeature].unique()
numberofclasses = len(target_classes)
else:
target_classes = []
numberofclasses = 'Not Available'
dataPoints = df_proprocessed.shape[0]
df_proprocessed = df_proprocessed.head(5)
df_proprocessed = df_proprocessed.to_json(orient="records")
df_proprocessed = json.loads(df_proprocessed)
expainableAIPath = os.path.join(request.session['deploypath'],'aion_xai.py')
outputStr = subprocess.check_output([sys.executable,expainableAIPath,'global'])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'aion_ai_explanation:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
ale_json = json.loads(str(outputStr))
ale_json = ale_json['data']
ale_view = ale_json['data']
sentences = ale_json['sentences']
scoreMessage = ''
feature_importance = ale_json['feature_importance']
dfimp = pd.DataFrame.from_dict(feature_importance)
dfimp = dfimp.sort_values(by=['values'],ascending=False).reset_index()
yaxis_data = dfimp['values'].tolist()
xaxis_data = dfimp['labels'].tolist()
cfig = go.Figure()
cfig.add_trace(go.Bar(x=xaxis_data,y=yaxis_data,name='Feature Importance'))
cfig |
.update_layout(barmode='stack',xaxis_title='Features')
bargraph = cfig.to_html(full_html=False, default_height=450,default_width=1000)
dftoprecords = dfimp.head(2)
topTwoFeatures = dfimp['labels'].tolist()
topFeaturesMsg = []
for i in range(0,len(dfimp)):
value = round(dfimp.loc[i, "values"],2)*100
value = round(value,2)
tvalue = str(dfimp.loc[i, "labels"])+' contributing to '+ str(value)+'%'
topFeaturesMsg.append(tvalue)
most_influencedfeature = ale_json['most_influencedfeature']
interceppoint = ale_json['interceptionpoint']
anchorjson = ale_json['anchorjson']
t2 = time.time()
context = {'ale_view':ale_view,'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'selected':'visualizer','subselected':'businessview','sentences':sentences,"bargraph":bargraph,'inputFields':inputFields,'nrows':nrows,'ncols':ncols,'targetFeature':targetFeature,'dataPoints':dataPoints,'target_classes':target_classes,'datarows':df_proprocessed,'numberofclasses':numberofclasses,'modelfeatures':modelfeatures,'problemType':problemType,'mfcount':mfcount,'topTwoFeatures':topTwoFeatures,'topFeaturesMsg':topFeaturesMsg,'most_influencedfeature':most_influencedfeature,'interceppoint':interceppoint,'achors':anchorjson,'labelMaps':labelMaps,'computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION}
log.info('Xplain Model : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + str(round(t2 - t1)) + ' sec' + ' : ' + 'Success')
return render(request, 'businessview.html', context)
except Exception as Inst:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
print(Inst)
log.info('Xplain Model : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Failed to Xplain Model, '+str(Inst))
log.info('Details : '+str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
return render(request,'businessview.html',{'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'error1':'Failed to Xplain Model','selected':'visualizer','subselected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION})
def predictionxplain(request):
log = logging.getLogger('log_ux')
from appbe.aion_config import settings
usecasetab = settings()
computeinfrastructure = compute.readComputeConfig()
selected_use_case, ModelVersion, ModelStatus = getusercasestatus(request)
try:
from appbe.telemetry import UpdateTelemetry
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'TrustedAI','Yes')
t1 = time.time()
displaypath = os.path.join( request.session['deploypath'],'etc','display.json')
with open(displaypath) as file:
config = json.load(file)
file.close()
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
inputFeatures = configSettingsJson['basic']['trainingFeatures']
targetFeature = configSettingsJson['basic']['targetFeature']
inputFeaturesList = inputFeatures.split(',')
if targetFeature in inputFeaturesList:
inputFeaturesList.remove(targetFeature)
inputFieldsDict = {}
problemType = 'classification'
for key in configSettingsJson['basic']['analysisType']:
if configSettingsJson['basic']['analysisType'][key] == 'True':
problemType = key
break
if problemType.lower() == 'timeseriesforecasting': #task 11997
inputFieldsDict['noofforecasts'] = request.POST.get('noofforecasts')
elif problemType == 'RecommenderSystem':
inputFieldsDict['uid'] = request.POST.get('uid')
inputFieldsDict['iid'] = request.POST.get('iid')
inputFieldsDict['rating'] = request.POST.get('rating')
else:
for feature in inputFeaturesList:
try:
dataFilePath = str(configSettingsJson['basic']['dataLocation'])
df = pd.read_csv(dataFilePath,nrows=10)
df.rename(columns=lambda x: x.strip(), inplace=True)
df = df[inputFeaturesList]
inputFieldsDict = df.to_dict(orient='index')[5]
except:
inputFieldsDict[feature] = request.POST.get(feature)
for key, value in inputFieldsDict.items():
if value == 'nan':
inputFieldsDict[key] = ''
inputFieldsJson = json.dumps(inputFieldsDict)
expainableAIPath = os.path.join(request.session['deploypath'],'aion_xai.py')
#print(inputFieldsJson)
outputStr = subprocess.check_output([sys.executable,expainableAIPath,'local',inputFieldsJson])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'aion_ai_explanation:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
#print(outputStr)
predict_dict = json.loads(str(outputStr))
if (predict_dict['status'] == 'SUCCESS'):
predict_dict = predict_dict['data']
prediction = predict_dict['prediction']
anchor = predict_dict['anchor']
precision = predict_dict['precision']
coverage = round(predict_dict['coverage'],2)
confidence = '95%'
forceplot_view = predict_dict['forceplot']
multidecisionplot_view = predict_dict['multidecisionplot']
waterfallplot_view = predict_dict['waterfallplot'] #Task12581
else:
context={'tab':'tabconfigure','error':'Failed to xplain','selected':'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION,'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion}
log.info('Xplain Prediction : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Failed to xplain')
return render(request,'businessview.html',context)
inputFields = []
inputFields.append(inputFieldsDict)
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
context={'tab' : 'predict','inputFields':inputFields,'prediction':prediction,'reason':anchor, 'precision': precision,'coverage':coverage,'confidence':confidence,'forceplot_view':forceplot_view,'multidecisionplot_view':multidecisionplot_view,'waterfallplot_view':waterfallplot_view,'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion,'selected' : 'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION}
t2= time.time()
log.info('Xplain Prediction : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + str(round(t2 - t1)) + ' sec' + ' : ' + 'Success')
return render(request, 'businessview.html', context = context)
except Exception as inst:
print(inst)
log.info('Xplain Prediction : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' +'0'+ 'sec' + ' : ' + 'Error : Failed to Xplain Prediction, ' + str(inst))
context={'tab' : 'tabconfigure','error' : 'Failed to Xplain Prediction','selected' : 'businessview','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION,'selected_use_case':selected_use_case,'ModelStatus':ModelStatus,'ModelVersion':ModelVersion}
return render(request,'businessview.html',context)
<s> from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
import json
from appbe.dataPath import DEFAULT_FILE_PATH
from appbe.dataPath import DATA_FILE_PATH
from appbe.dataPath import CONFIG_FILE_PATH
from appbe.dataPath import DEPLOY_LOCATION
from appbe.pages import getversion
from appbe.aion_config import running_setting
from appbe.training import checkModelUnderTraining
from appbe.training import calculate_total_activities
from appbe.training import check_unsupported_col
from appbe.training import check_granularity
from appbe.training import checkversionrunningstatus
from appbe.training import getModelStatus
from appbe.training import changeModelStatus
from appbe.training import calculate_total_interations
from appbe.pages import getusercasestatus
from utils.file_ops import read_df_compressed
import plotly.graph_objects as go
from appbe.pages import getMLModels
from appfe.modelTraining.models import usecasedetails
from appbe.training import getStatusCount
from appfe.modelTraining.models import Existusecases
import os,sys
import urllib, base64
import subprocess
import time
import re
import numpy as np
import pandas as pd
from pathlib import Path
import importlib
from appbe.log_ut import logg
from appbe import compute
import logging
AION_VERSION = getversion()
LOG_FILE_NAME = 'model_training_logs.log'
LOG_FOLDER = 'log'
def getPerformanceMatrix(deploypath,output_json):
displaypath = os.path.join(deploypath,'etc','display.json')
model_perf = []
try:
with open(displaypath) as file:
config = json.load(file)
file.close()
except Exception as e:
print(e)
import glob
resultJsonObj = json.loads(output_json)
if (resultJsonObj['data']['ModelType'] == 'anomalydetection' and resultJsonObj['data']['BestScore'] != 0) or resultJsonObj['data']['ModelType'].lower() == 'timeseriesanomalydetection': #task 11997
if resultJsonObj['data']['BestModel'].lower() == 'autoencoder' or resultJsonObj['data']['BestModel'].lower() == 'dbscan' :
try:
anomaly_plot_files = glob.glob(os.path.normpath(os.path.join(deploypath,'output','anomaly_plot','*.png')))
for plot in anomaly_plot_files:
if(os.path.isfile(plot)):
string = base64.b64encode(open(plot, "rb").read())
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
model_perf.append(image_64)
else:
model_perf.append('nograph')
except Exception as e:
print("Anomaly plot exe error: \\n",e)
else:
predictfile = os.path.join(deploypath,'data','predicteddata.csv')
if(os.path.isfile(predictfile)):
df = pd.read_csv(predictfile)
outliers=df.loc[df['predict']==-1]
outlier_index=list(outliers.index)
normals=df.loc[df['predict']!=-1]
normals_index=list(normals.index)
featuresList = df.columns.values.tolist()
#print(featuresList)
if 'predict' in featuresList:
featuresList.remove('predict')
if 'score' in featuresList:
featuresList.remove('score')
if len(featuresList) == 1:
xdata = df[featuresList[0]]
ydata = df['score']
fig = go.Figure()
fig.add_trace(go.Scatter(x=df[featuresList[0]].iloc[normals_index], y=df['score'].iloc[normals_index],mode='markers',name='Normal'))
fig.add_trace(go.Scatter(x=df[featuresList[0]].iloc[outlier_index], y=df['score'].iloc[outlier_index],mode='markers',name='Predicted Outliers'))
fig.update_layout(xaxis_title=featuresList[0],yaxis_title="Score")
frgraph = fig.to_html(full_html=False, default_height=400, default_width=1100)
model_perf.append(frgraph)
if len(featuresList) == 2:
fig = go.Figure()
df = df.reset_index()
fig.add_trace(go.Scatter(x=df[featuresList[0]], y=df[featuresList[1]],mode='markers',name='Normal Points'))
fig.add_trace(go.Scatter(x=df[featuresList[0]].iloc[outlier_index], y=df[featuresList[1]].iloc[outlier_index],mode='markers',name='Predicted Outliers'))
fig.update_xaxes(title_text=featuresList[0])
fig.update_yaxes(title_text=featuresList[1])
fig.update_layout(xaxis_title=featuresList[0],yaxis_title=featuresList[1])
frgraph = fig.to_html(full_html=False, default_height=400, default_width=1100)
model_perf.append(frgraph)
if len(featuresList) > 2:
from sklearn.decomposition import PCA
pca = PCA(2)
pca.fit(df)
res=pd.DataFrame(pca.transform(df))
Z = np.array(res)
fig = go.Figure()
fig.add_trace(go.Scatter(x=res[0], y=res[1],mode='markers',name='Normal Points'))
fig.add_trace(go.Scatter(x=res.iloc[outlier_index,0], y=res.iloc[outlier_index,1 |
],mode='markers',name='Predicted Outliers'))
fig.update_xaxes(title_text="Principal Component 1")
fig.update_yaxes(title_text="Principal Component 2")
frgraph = fig.to_html(full_html=False, default_height=400, default_width=1100)
model_perf.append(frgraph)
return (model_perf)
if config['problemType'].lower() == 'classification' or config['problemType'].lower() == 'anomaly_detection' or config['problemType'].lower() == 'timeseriesanomalydetection':
displaypath = os.path.join(deploypath,'log','img')
import glob
for img in glob.glob(displaypath+"/*.png"):
string = base64.b64encode(open(img, "rb").read())
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
model_perf.append(image_64)
#print(model_perf)
elif config['problemType'].lower() == 'regression' or config['problemType'].lower() == 'recommendersystem' or \\
config['problemType'].lower() == 'text similarity':
dataPath = config['predictedData']
readstatus,predict_df=read_df_compressed(dataPath)
regfig = go.Figure()
regfig.add_trace(go.Scatter(x=np.arange(1, len(predict_df) + 1), y=predict_df['actual'],
mode='lines',
name='Actual'))
regfig.add_trace(go.Scatter(x=np.arange(1, len(predict_df) + 1), y=predict_df['predict'],
mode='lines',
name='Predict'))
frgraph = regfig.to_html(full_html=False, default_height=400, default_width=1100)
rfgraph = ''
model_perf.append(frgraph)
elif config['problemType'].lower() == 'clustering':
dataPath = config['predictedData']
readstatus,predict_df=read_df_compressed(dataPath)
distinctCount = len(predict_df['predict'].unique())
clusterlist = predict_df['predict'].unique()
color = ['green','blue','red','orange','green','blue','red','orange']
fig = go.Figure()
for cluster in clusterlist:
df_cluster = predict_df[predict_df['predict'] == cluster]
modelFeatures = config['modelFeatures']
X1= df_cluster[modelFeatures[0]].tolist()
X2= df_cluster[modelFeatures[1]].tolist()
fig.add_trace(go.Scatter(x=X1, y=X2,mode='markers',name='cluster '+str(cluster)))
fig.update_layout(title="Cluster Graph",xaxis_title=modelFeatures[0],yaxis_title=modelFeatures[1],)
frgraph = fig.to_html(full_html=False, default_height=400, default_width=1100)
model_perf.append(frgraph)
elif config['problemType'].lower() == 'timeseriesforecasting': #task 11997
dataPath = config['predictedData']
predict_df = pd.read_csv(dataPath)
modelFeatures = config['modelFeatures']
for feature in modelFeatures:
feature_name = feature + '_actual'
prediction = feature + '_pred'
if feature_name in predict_df.columns:
regfig = go.Figure()
regfig.add_trace(go.Scatter(x=np.arange(1, len(predict_df) + 1), y=predict_df[feature_name],
mode='lines',
name=feature))
regfig.add_trace(go.Scatter(x=np.arange(1, len(predict_df) + 1), y=predict_df[prediction],
mode='lines',
name='Predict'))
frgraph = regfig.to_html(full_html=False, default_height=400, default_width=1100)
model_perf.append(frgraph)
return (model_perf)
def stoptraining(request):
request.session['ModelStatus'] = 'Terminated'
request.session.save()
changeModelStatus(Existusecases,request.session['modelid'],'Terminated','NA','NA')
return HttpResponse('Terminated')
def kill_child_proc_rec(ppid):
import psutil
for process in psutil.process_iter():
_ppid = process.ppid()
if _ppid == ppid:
_pid = process.pid
kill_child_proc_rec(_pid)
print(f'Terminating: {_pid}')
if sys.platform == 'win32':
process.terminate()
else:
os.system('kill -9 {0}'.format(_pid))
def getDataFileCountAndSize(basicConfig):
import glob
path = basicConfig['dataLocation']
radiovalue = basicConfig['folderSettings']['fileExtension']
filesCount = 0
filesSize = 0
files = []
for filename in glob.iglob(os.path.join(path, "**/*." + radiovalue), recursive=True):
files.append(filename)
if radiovalue == 'doc':
for filename in glob.iglob(os.path.join(path, "**/*." + 'docx'), recursive=True):
files.append(filename)
for filename in files:
#for filename in glob.iglob(os.path.join(path, "**/*." + radiovalue), recursive=True):
filesCount = filesCount + 1
get_size = os.path.getsize(filename)
filesSize = round(filesSize + get_size, 1)
if filesSize > 1048576:
size = round((filesSize / (1024 * 1024)), 1)
filesSize = str(size) + ' M'
elif filesSize > 1024:
size = round((filesSize /1024), 1)
filesSize = str(size) + ' K'
else:
filesSize = str(filesSize) + ' B'
return filesCount,filesSize
# task 4343 Abort training
def read_log_file( config_file):
outputStr = 'aion_learner_status:{"status":"Fail","message":"Log file not found"}'
if Path(config_file).exists():
with open(config_file, 'r', encoding='utf-8') as f:
config = json.load(f)
deployPath = Path(config['basic']['deployLocation'])
log_file = deployPath/config['basic']['modelName'].replace(' ', '_')/config['basic']['modelVersion']/LOG_FOLDER/LOG_FILE_NAME
if log_file.exists():
with open(log_file, 'r', encoding='utf-8') as f:
outputStr = f.read()
return outputStr
def checkVectorDBPackage(embeddedDB):
errorStatus = 'False'
if embeddedDB.lower() == 'vectordb':
status = importlib.util.find_spec('chromadb')
if not status:
errorStatus = 'True'
return errorStatus
def getModelSize(configSettings,model):
modelSize = 'NA'
if 'modelSize' in configSettings['basic']:
selectedModelSize = configSettings['basic']['modelSize']['llmFineTuning'][model]
for k in selectedModelSize.keys():
if configSettings['basic']['modelSize']['llmFineTuning'][model][k] == 'True':
modelSize = k
break
return modelSize
def llmmodelevaluate(request):
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
usecasename = request.session['usecaseid'].replace(" ", "_")
from appbe.prediction import get_instance
hypervisor,instanceid,region,image = get_instance(usecasename+'_'+str(ModelVersion))
scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','aion.py'))
usecaseconfigfile = request.session['config_json']
f = open(usecaseconfigfile, "r+", encoding="utf-8")
configSettingsData = f.read()
f.close()
configSettings = json.loads(configSettingsData)
problem_type = ''
modelSize = ''
problemtypes = configSettings['basic']['analysisType']
for k in problemtypes.keys():
if configSettings['basic']['analysisType'][k] == 'True':
problem_type = k
break
mlmodels =''
algorihtms = configSettings['basic']['algorithms'][problem_type]
for k in algorihtms.keys():
if configSettings['basic']['algorithms'][problem_type][k] == 'True':
if mlmodels != '':
mlmodels += ', '
mlmodels += k
if 'modelSize' in configSettings['basic']:
selectedModelSize = configSettings['basic']['modelSize']['llmFineTuning'][mlmodels]
for k in selectedModelSize.keys():
if configSettings['basic']['modelSize']['llmFineTuning'][mlmodels][k] == 'True':
modelSize = k
break
eval = ''
if configSettings['basic']['folderSettings']['fileType'] == 'LLM_Document':
eval = 'doc'
elif configSettings['basic']['folderSettings']['fileType'] == 'LLM_Code':
eval = 'code'
#print(sys.executable, scriptPath,hypervisor,instanceid,f'{mlmodels}-{modelSize}',selected_use_case+'_'+str(ModelVersion),eval)
outputStr = subprocess.check_output([sys.executable, scriptPath,'-m','llmbenchmarking','-hv',hypervisor,'-i',instanceid,'-md',f'{mlmodels}-{modelSize}','-uc',usecasename+'_'+str(ModelVersion),'-e',eval])
return trainmodel(request)
def trainresult(request):
from appbe.aion_config import settings
usecasetab = settings()
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
usecasename = request.session['usecaseid'].replace(" ", "_")
log = logging.getLogger('log_ux')
computeinfrastructure = compute.readComputeConfig()
trainmodel =request.POST.get('trainmodel')
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r+", encoding="utf-8")
configSettingsData = f.read()
configSettings = json.loads(configSettingsData)
total_steps = calculate_total_activities(configSettings)
request.session['total_steps'] = total_steps
p = usecasedetails.objects.get(usecaseid=request.session['usecaseid'])
usecaseindex = p.id #bugid:14163
if trainmodel == 'Train Model':
try:
if configSettings['basic']['analysisType']['survivalAnalysis'] != 'True' and configSettings['basic']['analysisType']['llmFineTuning'] != 'True':
configSettings['advance']['testPercentage'] = int(request.POST.get('TrainTestPercentage',0)) #Unnati
configSettings['advance']['categoryBalancingMethod'] = request.POST.get('BalancingMethod','NA')
if configSettings['basic']['analysisType']['llmFineTuning'] == 'True':
configSettings['basic']['vmRunning'] = request.POST.get('vmRunning','KeepRunning')
if configSettings['basic']['analysisType']['similarityIdentification'] == 'True':
dbs = configSettings['basic']['preprocessing']['similarityIdentification']
for dbt in dbs.keys():
configSettings['basic']['preprocessing']['similarityIdentification'][dbt]='False'
configSettings['basic']['preprocessing']['similarityIdentification'][request.POST.get('contentdb')] = 'True'
errorStatus = checkVectorDBPackage(request.POST.get('contentdb'))
if errorStatus.lower() == 'true':
return render(request, 'training.html', {'error': 'Error: Chromadb package not found.','selected_use_case': selected_use_case,'ModelVersion': ModelVersion,'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION,'contentdb':''})
if configSettings['basic']['analysisType']['contextualSearch'] == 'True':
dbs = configSettings['basic']['preprocessing']['contextualSearch']
for dbt in dbs.keys():
configSettings['basic']['preprocessing']['contextualSearch'][dbt]='False'
configSettings['basic']['preprocessing']['contextualSearch'][request.POST.get('contentdb')] = 'True'
errorStatus = checkVectorDBPackage(request.POST.get('contentdb'))
if errorStatus.lower() == 'true':
return render(request, 'training.html', {'error': 'Error: Chromadb package not found.','selected_use_case': selected_use_case,'ModelVersion': ModelVersion,'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'version':AION_VERSION,'contentdb':''})
updatedConfigSettingsJson = json.dumps(configSettings)
f.seek(0)
f.write(updatedConfigSettingsJson)
f.truncate()
f.close()
# output_json = aion_train_model(updatedConfigFile)
request.session['noflines'] = 0
request.session['ModelStatus'] = 'Running'
request.session.save()
changeModelStatus(Existusecases,request.session['modelid'],'Running','NA','NA')
#print(configSettings['basic']['distributedLearning'])
#sys.exit()
import timeit
startTime = timeit.default_timer()
process_killed = False
if computeinfrastructure['computeInfrastructure'].lower() == 'aws' and configSettings['basic']['analysisType']['llmFineTuning'] != 'True':
scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','aion.py'))
#print(scriptPath,updatedConfigFile)
outputStr = subprocess.check_output([sys.executable, scriptPath,'-m','awstraining','-c',updatedConfigFile])
elif computeinfrastructure['computeInfrastructure'].lower() in ['aws','gcp']:
scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','aion.py'))
outputStr = subprocess.check_output([sys.executable, scriptPath,'-m','llmtuning','-c',updatedConfigFile])
else:
if configSettings['basic']['analysisType']['multiLabelPrediction'] == 'True' or configSettings['basic']['analysisType']['multiModalLearning'] == 'True':
scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','bin','run_gluon.py'))
outputStr = subprocess.check_output([sys.executable, scriptPath, updatedConfigFile])
elif configSettings['basic']['onlineLearning'] == 'True':
scriptPath = os.path.norm |
path(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','aion.py'))
outputStr = subprocess.check_output([sys.executable, scriptPath,'-m','onlinetraining','-c',updatedConfigFile])
elif configSettings['basic']['distributedLearning'] == 'True':
scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','aion.py'))
outputStr = subprocess.check_output([sys.executable, scriptPath,'-m','distributedtraining','-c',updatedConfigFile])
else:
scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','aion.py'))
cmd = [sys.executable, scriptPath,'-m','training','-c',updatedConfigFile] # task 4343 abort training
training_proc = subprocess.Popen( cmd)
outputStr = ''
while training_proc.poll() == None:
if getModelStatus(Existusecases,request.session['modelid']) == 'Terminated':
kill_child_proc_rec(training_proc.pid)
training_proc.kill()
process_killed = True
time.sleep(1)
if process_killed:
outputStr = 'aion_learner_status:{"status":"Fail","message":"Terminated by user"}'
else:
outputStr = read_log_file( updatedConfigFile)
usename = request.session['UseCaseName'].replace(" ", "_")
outputfile = os.path.join(DEPLOY_LOCATION,usename,str(request.session['ModelVersion']),'etc','output.json')
if os.path.isfile(outputfile):
f1 = open(outputfile, "r+", encoding="utf-8")
outputStr = f1.read()
f1.close()
else:
if not isinstance( outputStr, str):
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'aion_learner_status:(.*)', str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
resultJsonObj = json.loads(outputStr)
#print(resultJsonObj)
odataFile = request.session['outputfilepath']
with open(odataFile, 'w') as json_file:
json.dump(resultJsonObj, json_file)
json_file.close()
model = Existusecases.objects.get(id=request.session['modelid'])
request.session['ModelStatus'] = resultJsonObj['status']
ModelStatus = request.session['ModelStatus']
model.Status = resultJsonObj['status']
training_error = ''
if resultJsonObj['status'] == 'SUCCESS':
model.modelType = resultJsonObj['data']['ModelType']
model.DeployPath = str(resultJsonObj['data']['deployLocation'])
if resultJsonObj['data']['ModelType'] in ['clustering','anomalydetection', 'timeSeriesAnomalyDetection']: #task 11997
model.ProblemType = 'unsupervised'
else:
model.ProblemType = 'supervised'
else:
training_error = resultJsonObj['message']
model.save()
problemtypes = configSettings['basic']['analysisType']
#print(problemtypes.keys())
problem_typ = ""
for k in problemtypes.keys():
if configSettings['basic']['analysisType'][k] == 'True':
problem_typ = k
break
modeltyp = problem_typ
listofmodels = ''
problem_type,dproblem_type,sc,mlmodels,dlmodels,smodelsize = getMLModels(configSettings)
if mlmodels != '':
listofmodels += str(mlmodels)
if dlmodels != '':
listofmodels += listofmodels+' '+str(dlmodels)
from appbe.telemetry import UpdateTelemetry
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'Algorithms',listofmodels)
# ----------------------------------------------------------------------------- #
if (problem_type == 'classification' or problem_type == 'regression'):
if len(mlmodels.split(',')) == 1:
trainingTime = timeit.default_timer() - startTime
trainingTime = round(trainingTime/60)
# calculate the size of uploaded dataset
filePath = configSettings['basic']['dataLocation']
sz = os.path.getsize(filePath)
fileSizeMB = sz / (1024 * 1024)
filesize = str(fileSizeMB) + " MB"
featuresCount = str(len(configSettings['basic']['trainingFeatures'].split(',')))
modelname = mlmodels.split(',')[0]
fileSizeMBLimit = 0
configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','config','training.config')
if(os.path.isfile(configfilepath)):
file = open(configfilepath, "r", encoding="utf-8")
read = file.read()
file.close()
for line in read.splitlines():
if 'fileSizeMBLimit=' in line:
fileSizeMBLimit = int(line.split('=',1)[1])
# append the new entry into config only if size of uploaded dataset meets the threshold
if fileSizeMB > fileSizeMBLimit:
_val = updateRunConfig(trainingTime, filesize, featuresCount, modelname, problem_type)
# ----------------------------------------------------------------------------- #
if resultJsonObj['status'] == 'SUCCESS':
#from appbe import telemetry
request.session['deploypath'] = str(resultJsonObj['data']['deployLocation'])
from appbe.trainresult import ParseResults
result, survical_images = ParseResults(outputStr)
model_perf = getPerformanceMatrix(request.session['deploypath'],outputStr)
#telemetry.telemetry_data('Training Successfully Done',selected_use_case+'_'+str(ModelVersion),str(listofmodels))
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'Operation','Success')
request.session['currentstate'] = 3
request.session['finalstate'] = 4
request.session.save()
file_path = request.session['logfilepath']
my_file = open(file_path, 'r',encoding="utf8")
file_content = my_file.read()
my_file.close()
matched_lines = [line.replace('Status:-', '') for line in file_content.split('\\n') if "Status:-" in line]
matched_status_lines = matched_lines[::-1]
matched_status_lines = matched_status_lines[0]
matched_status_lines = matched_status_lines.split('...')
matched_status_lines = matched_status_lines[1]
no_lines = len(matched_lines)
if 'noflines' not in request.session:
request.session['noflines'] = 0
request.session['noflines'] = request.session['noflines'] + 1
if request.session['ModelStatus'] != 'SUCCESS':
numberoflines = request.session['noflines']
if numberoflines > no_lines:
numberoflines = no_lines
request.session['noflines'] = no_lines
matched_lines = matched_lines[0:numberoflines]
shortlogs = getStatusCount(matched_lines,request.session['total_steps'])
temp = {}
temp['modelName'] = request.session['UseCaseName']
temp['modelVersion'] = request.session['ModelVersion']
config = {}
config['modelName'] = request.session['UseCaseName']
config['modelVersion'] = request.session['ModelVersion']
config['datetimeFeatures'] = configSettings['basic']['dateTimeFeature']
config['sequenceFeatures'] = configSettings['basic']['indexFeature']
config['FeaturesList'] = configSettings['basic']['trainingFeatures']
config['unimportantFeatures'] = ''
config['targetFeature'] = configSettings['basic']['targetFeature']
modelCondition = ''
problemtypes = configSettings['basic']['analysisType']
problem_type = ""
for k in problemtypes.keys():
if configSettings['basic']['analysisType'][k] == 'True':
problem_type = k
break
problem_type,dproblemType,sc,mlmodels,dlmodels,smodelsize = getMLModels(configSettings)
configSettings['basic']['problem_type'] = problem_type
configSettings['basic']['dproblem_type'] = dproblemType
if mlmodels != '':
configSettings['basic']['mllearner'] = 'enable'
if dlmodels != '':
configSettings['basic']['dllearner'] = 'enable'
if configSettings['basic']['analysisType']['multiLabelPrediction'] == 'True':
configSettings['basic']['selected_ML_Models'] = 'AutoGluon'
configSettings['basic']['mllearner'] = 'enable'
else:
configSettings['basic']['selected_ML_Models'] = mlmodels
configSettings['basic']['selected_DL_Models'] = dlmodels
configSettings['basic']['smodel_size'] = smodelsize
if 'noOfRecords' in configSettings['basic']:
records = configSettings['basic']['noOfRecords']
else:
from appbe.train_output import getDataSetRecordsCount
records = getDataSetRecordsCount(configSettings['basic']['dataLocation'])
filesCount = 0
filesSize = 0
#print(configSettings['basic']['analysisType']['llmFineTuning'].lower())
#print(configSettings['basic']['folderSettings']['fileType'].lower())
if configSettings['basic']['analysisType']['llmFineTuning'].lower() == 'true' and configSettings['basic']['folderSettings']['fileType'].lower() in ['llm_document','llm_code']:
filesCount,filesSize = getDataFileCountAndSize(configSettings['basic'])
noofIteration = calculate_total_interations(configSettings)
features = configSettings['basic']['trainingFeatures'].split(',')
noOfTrainingFeatures = len(features)
configSettings['basic']['problem_type']=problem_type
featuretype = configSettings['advance']['profiler']['featureDict']
if ('Logistic Regression' not in mlmodels) or ('Linear Regression' not in mlmodels):
selectedmodel = 'modelcomparision'
else:
selectedmodel = " "
user_provided_data_type = {}
text_type=''
for feat_conf in featuretype:
colm = feat_conf.get('feature', '')
if feat_conf['type'] == "text":
text_type="text"
break
contentdb = ''
if problem_type.lower() in ['similarityidentification','contextualsearch']:
if configSettings['basic']['preprocessing'][problem_type]['CSV'].lower() == 'true':
contentdb = 'CSV'
elif configSettings['basic']['preprocessing'][problem_type]['VectorDB'].lower() == 'true':
contentdb = 'VectorDB'
context = {'tab': 'trainresult','filesCount':filesCount,'filesSize':filesSize, 'result': result, 'selectedmodel': selectedmodel, 'advconfig': configSettings, 'shortlogs':shortlogs,
'selected_use_case': selected_use_case, 'noOfRecords': records,'noOfTrainingFeatures':noOfTrainingFeatures,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'usecaseid':usecaseindex,#bugid:14163 #BugID13336
'noofIteration':noofIteration,'log_file':file_content,'contentdb':contentdb,
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],
'model_perf': model_perf,'logs':matched_status_lines, 'perf_images': survical_images, 'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'usecasename':usecasename}
context['version'] = AION_VERSION
return render(request, 'training.html', context)
else:
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'Operation','Error')
request.session['currentstate'] = 3
request.session['finalstate'] = 4
#from appbe import telemetry
if process_killed:
errorMsg = 'Terminated by user'
else:
errorMsg = 'Model Training Error (check log file for more details)'
contentdb = ''
if problem_type.lower() in ['similarityidentification','contextualsearch']:
if configSettings['basic']['preprocessing'][problem_type]['CSV'].lower() == 'true':
contentdb = 'CSV'
elif configSettings['basic']['preprocessing'][problem_type]['VectorDB'].lower() == 'true':
contentdb = 'VectorDB'
#telemetry.telemetry_data('Training Error',selected_use_case+'_'+str(ModelVersion),str(listofmodels))
context = {'tab': 'trainresult', 'error': errorMsg,'selected_use_case': selected_use_case,'contentdb':contentdb,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'usecaseid':usecaseindex,#bugid:14163 #BugID13336
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],
'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'usecasename':usecasename}
context['version'] = AION_VERSION
return render(request, 'training.html', context)
except Exception as e:
log.info('Training Fail:' + str(selected_use_case) + ':' + str(ModelVersion) + ':' + '0' + 'sec' + ':' + 'Training fail '+str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
print(e)
return render(request, 'training.html', {'error': 'Model Training Error','selected_use_case': selected_use_case,'ModelVersion': ModelVersion,'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'usecasename':usecasename,'usecasetab':usecasetab,'version':AION_VERSION,'contentdb':''})
else:
modelCondition = ''
problemtypes = configSettings['basic']['analysisType']
problem_type = ""
for k in problemtypes.keys():
if configSettings['basic']['analysisType'][k] == 'True':
problem_type = k
break
problem_type,dproblem_type,sc,mlmodels,dlmodels,smodelsize = getMLModels(configSettings)
configSettings['basic']['problem_type'] = problem_type
configSettings['basic']['dproblem |
_type'] = dproblem_type
if mlmodels != '':
configSettings['basic']['mllearner'] = 'enable'
if dlmodels != '':
configSettings['basic']['dllearner'] = 'enable'
if configSettings['basic']['analysisType']['multiLabelPrediction'] == 'True':
configSettings['basic']['selected_ML_Models'] = 'AutoGluon'
configSettings['basic']['mllearner'] = 'enable'
else:
configSettings['basic']['selected_ML_Models'] = mlmodels
configSettings['basic']['selected_DL_Models'] = dlmodels
if 'noofRecords' in configSettings['basic']:
records = configSettings['basic']['noofRecords']
else:
from appbe.train_output import getDataSetRecordsCount
records = getDataSetRecordsCount(configSettings['basic']['dataLocation'])
filesCount = 0
filesSize = 0
print(configSettings['basic']['analysisType']['llmFineTuning'].lower())
print(configSettings['basic']['folderSettings']['fileType'].lower())
if configSettings['basic']['analysisType']['llmFineTuning'].lower() == 'true' and \\
configSettings['basic']['folderSettings']['fileType'].lower() in ['llm_document', 'llm_code']:
filesCount, filesSize = getDataFileCountAndSize(configSettings['basic'])
noofIteration = calculate_total_interations(configSettings)
features = configSettings['basic']['trainingFeatures'].split(',')
noOfTrainingFeatures = len(features)
configSettings['basic']['problem_type']=problem_type
context = { 'advconfig': configSettings,'filesCount':filesCount,'filesSize':filesSize,
'selected_use_case': selected_use_case, 'noOfRecords': records, 'ModelStatus': ModelStatus,'ModelVersion': ModelVersion,'noofIteration':noofIteration,'usecasename':usecasename,
'modelCondition':modelCondition, 'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'noOfTrainingFeatures':noOfTrainingFeatures}
context['version'] = AION_VERSION
return render(request, 'training.html',context)
def getTrainingTime(filePath, no_of_features):
#filePath = 'C:\\\\MyFolder\\AION\\\\AION Datasets\\\\Heavy Datasets\\\\class_1MRows_26Cols.csv'
returnVal = '0_0'
if(os.path.isfile(filePath)):
trainingTime = 0
neartrainingTime = 0 # It's used to store the closest Training-Time
nearsampleSize = 0 # It's used to store the closest Sample-Size
leastSizeDifference = 0 # It's used to find the possible minimum difference between the dataset's actual size and Sample-Size in JSON file
inRange = 0 # It's used to identify if Extrapolation is needed or not
fileSizeMBLimit = 0 # It's used to check/limit the size of uploaded dataset
acceptSizeVariance = 10 # It's used to cover the variance in sample-size
featuresThreshold = 50 # It's used to set the boundary/binary-classification of records-typte
# ------------------------------------------------------------------------------------------------------------ #
configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','config','training.config')
if(os.path.isfile(configfilepath)):
file = open(configfilepath, "r")
read = file.read()
file.close()
for line in read.splitlines():
if 'fileSizeMBLimit=' in line:
fileSizeMBLimit = int(line.split('=',1)[1])
if 'acceptSizeVariance=' in line:
acceptSizeVariance = int(line.split('=',1)[1])
if 'featuresThreshold=' in line:
featuresThreshold = int(line.split('=',1)[1])
# get the size of uploaded dataset/file (in MB)
sz = os.path.getsize(filePath)
fileSizeMB = sz / (1024 * 1024)
# check if uploaded dataset/file is bigger than defined threshold or not. If yes, only than go to calculate the tentative training-time
if(fileSizeMB > fileSizeMBLimit):
configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','config','training_runs.json')
try:
if(os.path.isfile(configfilepath)):
# Opening JSON file
f = open(configfilepath)
# returns JSON object as a dictionary
data = json.load(f)
# Iterating through the json list
for run in data['runs']:
sampleSize = run['sampleSize'].replace(" MB","")
sampleSize = int(float(sampleSize))
features = int(run['features'])
# match records under 10% (+ or -) of variance
sizeDifference = fileSizeMB - sampleSize
if (sizeDifference < 0):
sizeDifference = sizeDifference * -1
if (leastSizeDifference == 0):
leastSizeDifference = sizeDifference
# ------------------------------------------------------------------------------------------------ #
if (no_of_features <= featuresThreshold):
if ((sizeDifference * 100)/fileSizeMB < acceptSizeVariance and features <= featuresThreshold):
acceptSizeVariance = (sizeDifference * 100)/fileSizeMB
trainingTime = run['trainingTime'].replace(" Mins","")
trainingTime = int(trainingTime)
returnVal = str(trainingTime) + '_match'
inRange = 1
# get the nearest value of sampleSize (which can be used for extrapolation) from the JSON file
if (sizeDifference <= leastSizeDifference and features <= featuresThreshold):
nearsampleSize = sampleSize
leastSizeDifference = sizeDifference
neartrainingTime = run['trainingTime'].replace(" Mins","")
neartrainingTime = int(neartrainingTime)
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
if (no_of_features > featuresThreshold):
if ((sizeDifference * 100)/fileSizeMB < acceptSizeVariance and features > featuresThreshold):
acceptSizeVariance = (sizeDifference * 100)/fileSizeMB
trainingTime = run['trainingTime'].replace(" Mins","")
trainingTime = int(trainingTime)
returnVal = str(trainingTime) + '_match'
inRange = 1
# get the nearest value of sampleSize (which can be used for extrapolation) from the JSON file
if (sizeDifference <= leastSizeDifference and features > featuresThreshold):
nearsampleSize = sampleSize
leastSizeDifference = sizeDifference
neartrainingTime = run['trainingTime'].replace(" Mins","")
neartrainingTime = int(neartrainingTime)
# ------------------------------------------------------------------------------------------------ #
# When there is no record (sample-size) matched with 10% of variance then go for the extrapolation
if (inRange == 0):
sizeDifference = fileSizeMB - nearsampleSize
ratio = (sizeDifference * 100)/nearsampleSize
trainingTime = neartrainingTime + ((ratio * neartrainingTime)/100)
trainingTime = int(trainingTime)
returnVal = str(trainingTime) + '_extrapolation'
# Closing file
f.close()
except Exception as inst:
pass
return returnVal
def getllmmodelscore(usecaseid,model):
DB_TABLE = 'llm_benchmarking'
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
file_path = str(Path(DATA_DIR)/'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
usecaseidcond = f'usecaseid="{usecaseid}"'
helptxt =''
msg = ''
#print(usecaseidcond)
if sqlite_obj.table_exists(DB_TABLE):
usecasemodelscore = sqlite_obj.read_data(DB_TABLE,usecaseidcond)
status = ''
finetunedscore = 'NA'
foundationscore = 'NA'
benchmarkdataset = 'NA'
modelfunctionscore = {'CodeLLaMA-2-7B':'33%','CodeLLaMA-2-13B':'36%','LLaMA-2-7B':'16.8%','LLaMA-2-13B':'20.1%','LLaMA-2-70B':'31.0%','LLaMA-2-Chat-7B':'76%','LLaMA-2-Chat-13B':'79.2%','LLaMA-2-Chat-70B':'84.2%','Falcon-7B':'NA','Falcon-40B':'NA'}
foundationscore = modelfunctionscore.get(model,'NA')
scoretype='NA'
for x in usecasemodelscore:
#print(x)
keys = sqlite_obj.column_names(DB_TABLE)
#print(keys)
status = x[keys.index('state')]
if status.lower() in ['success','finished']:
result_type = x[keys.index('result_type')]
result = eval(x[keys.index('result')])
scoretype = list(result.keys())[0]
if scoretype.lower() == 'hellaswag':
benchmarkdataset = 'HellaSwag'
helptxt = 'HellaSwag is a challenge dataset for evaluating commonsense Natural Language Inferencing. It consists of ~70k multiple choice questions with four answer choices about what might happen next. The correct answer is the (real) sentence for the next event; the three incorrect answers are adversarial generated and human verified.'
else:
benchmarkdataset = 'HumanEval'
if result_type == 'dict':
sub_result = list(result.values())[0]
scoretype = list(sub_result.keys())[0]
if scoretype == 'acc':
scoretype = 'Accuracy'
finetunedscore = str(round((float(list(sub_result.values())[0])*100),2))
finetunedscore = f'{finetunedscore}%'
else:
finetunedscore = str(round((float(list(result.values())[0])*100),2))
elif status.lower() == 'error':
msg = x[keys.index('result')]
evaluation = {'status':status,'msg':msg,'benchmarkdataset':benchmarkdataset,'scoreType':scoretype,'finetunedscore':str(finetunedscore),'foundationscore':foundationscore,'helptxt':helptxt}
else:
evaluation = {'status':'','scoreType':'','benchmarkdataset':'','finetunedscore':'','foundationscore':'','helptxt':''}
#print(evaluation)
return evaluation
def trainmodel(request):
from appbe.aion_config import settings
usecasetab = settings()
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
usecasename = request.session['usecaseid'].replace(" ", "_")
try:
checkModelUnderTraining(request,usecasedetails,Existusecases)
computeinfrastructure = compute.readComputeConfig()
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r+", encoding="utf-8")
configSettingsData = f.read()
configSettingsJson = json.loads(configSettingsData)
total_steps = calculate_total_activities(configSettingsJson)
warning = check_unsupported_col(configSettingsJson)
time_series_warning = check_granularity(configSettingsJson)
noofIteration = calculate_total_interations(configSettingsJson)
request.session['total_steps'] = total_steps
p = usecasedetails.objects.get(usecaseid=request.session['usecaseid'])
usecaseid = p.id
modelCondition = ''
problem_type,dproblem_type,sc,mlmodels,dlmodels,smodelsize = getMLModels(configSettingsJson)
configSettingsJson['basic']['problem_type'] = problem_type
configSettingsJson['basic']['dproblem_type'] = dproblem_type
if mlmodels != '':
configSettingsJson['basic']['mllearner'] = 'enable'
if dlmodels != '':
configSettingsJson['basic']['dllearner'] = 'enable'
if configSettingsJson['basic']['analysisType']['multiLabelPrediction'] == 'True' or configSettingsJson['basic']['analysisType']['multiModalLearning'] == 'True':
configSettingsJson['basic']['selected_ML_Models'] = 'AutoGluon'
configSettingsJson['basic']['mllearner'] = 'enable'
else:
configSettingsJson['basic']['selected_ML_Models'] = mlmodels
configSettingsJson['basic']['selected_DL_Models'] = dlmodels
configSettingsJson['basic']['smodel_size'] = smodelsize
# ---------------------------------------------------------------------- #
cal_trainingTime = 0.
is_extrapolation = 'No'
is_DataImbalance = 'No'
if (request.session['ModelStatus'] == 'Not Trained' and (problem_type == 'classification' or problem_type == 'regression')):
# <!-- ------------------------------ Data Imbalance Changes ------------------------------ -->
if ( problem_type == 'classification' ):
is_DataImbalance = 'Yes'
# <!-- ------------------------------------------------------------------------------------- -->
if len(mlmodels.split(',')) == 1:
filePath = configSettingsJson['basic']['dataLocation']
no_of_features = len(configSettingsJson['basic']['trainingFeatures'].split(','))
returnVal = getTrainingTime(filePath, no_of_features)
cal_trainingTime = int(returnVal.split('_')[0])
if (returnVal.split('_')[1] == 'extrapolation'):
is_extrapolation = 'Yes'
# ---------------------------------------------------------------------- #
features = configSettingsJson['basic']['trainingFeatures'].split(',')
if configSettingsJson['basic']['targetFeature'] in features:
features.remove(configSettingsJson['basic']['targetFeature'])
noOfTrainingFeatures = len(features)
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
if 'noOfRecords' in configSettingsJson['basic']:
records = configSettingsJson['basic']['noOfRecords']
else:
from appbe.train_output import getDataSetRecordsCount
records = getDataSetRecordsCount(configSettingsJson['basic']['dataLocation'])
filesCount = 0
filesSize = 0
try:
if configSettingsJson['basic']['analysisType']['llmFineTuning'].lower() == 'true' and \\
configSettingsJson['basic']['folderSettings']['fileType'].lower() |
in ['llm_document', 'llm_code']:
filesCount, filesSize = getDataFileCountAndSize(configSettingsJson['basic'])
except:
pass
if request.session['finalstate'] <= 3:
request.session['finalstate'] = 3
request.session['currentstate'] = 3
if request.session['ModelStatus'].lower() == 'running':
model = Existusecases.objects.get(ModelName=request.session['ModelName'],
Version=request.session['ModelVersion'])
status = checkversionrunningstatus(model.id,usecasedetails,Existusecases)
request.session['ModelStatus'] = status
request.session.save()
if request.session['ModelStatus'] == 'SUCCESS':
model = Existusecases.objects.get(ModelName=request.session['ModelName'],
Version=request.session['ModelVersion'])
output_train_json_filename = str(model.TrainOuputLocation)
f = open(output_train_json_filename, "r+")
training_output = f.read()
f.close()
model_perf = getPerformanceMatrix(request.session['deploypath'],training_output)
from appbe.trainresult import ParseResults
result, survical_images = ParseResults(training_output)
file_path = request.session['logfilepath']
my_file = open(file_path, 'r',encoding="utf-8")
file_content = my_file.read()
my_file.close()
matched_lines = [line.replace('Status:-', '') for line in file_content.split('\\n') if "Status:-" in line]
matched_status_lines = matched_lines[::-1]
matched_status_lines = matched_status_lines[0]
matched_status_lines = matched_status_lines.split('...')
matched_status_lines = matched_status_lines[1]
no_lines = len(matched_lines)
if 'noflines' not in request.session:
request.session['noflines'] = 0
request.session['noflines'] = request.session['noflines'] + 1
if request.session['ModelStatus'] != 'SUCCESS':
numberoflines = request.session['noflines']
if numberoflines > no_lines:
numberoflines = no_lines
request.session['noflines'] = no_lines
matched_lines = matched_lines[0:numberoflines]
shortlogs = getStatusCount(matched_lines,request.session['total_steps'])
featuretype = configSettingsJson['advance']['profiler']['featureDict']
user_provided_data_type = {}
text_type=''
for feat_conf in featuretype:
colm = feat_conf.get('feature', '')
if feat_conf['type'] == "text":
text_type="text"
break
configSettingsJson['basic']['problem_type']= problem_type
configSettingsJson['basic']['selected_ML_Models']= mlmodels
if ('Logistic Regression' not in mlmodels) or ('Linear Regression' not in mlmodels):
selectedmodel = 'modelcomparision'
else:
selectedmodel = " "
contentdb = ''
finetunedeval = {}
if problem_type.lower() in ['similarityidentification','contextualsearch']:
if configSettingsJson['basic']['preprocessing'][problem_type]['CSV'].lower() == 'true':
contentdb = 'CSV'
elif configSettingsJson['basic']['preprocessing'][problem_type]['VectorDB'].lower() == 'true':
contentdb = 'VectorDB'
if problem_type.lower() == 'llmfinetuning':
modelSize = getModelSize(configSettingsJson,mlmodels)
usecasename = request.session['usecaseid'].replace(" ", "_")
finetunedeval = getllmmodelscore(f'{usecasename}_{ModelVersion}',f'{mlmodels}-{modelSize}')
context = {'result': result,'filesCount':filesCount,'filesSize':filesSize, 'text_type':text_type,'selectedmodel':selectedmodel, 'advconfig': configSettingsJson,'usecaseid':usecaseid,'usecasename':usecasename,
'selected_use_case': selected_use_case, 'noOfRecords': records, 'ModelStatus': ModelStatus,'warning':warning,'time_series_warning':time_series_warning,
'modelCondition':modelCondition,'ModelVersion': ModelVersion,'shortlogs':shortlogs,'logs':matched_status_lines,'currentstate': request.session['currentstate'],'finalstate': request.session['finalstate'], 'model_perf': model_perf,'perf_images': survical_images, 'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'noOfTrainingFeatures':noOfTrainingFeatures,'version':AION_VERSION,'noofIteration':noofIteration,'log_file':file_content,'contentdb':contentdb,'finetunedeval':finetunedeval}
else:
contentdb = ''
if problem_type.lower() in ['similarityidentification','contextualsearch']:
if configSettingsJson['basic']['preprocessing'][problem_type]['CSV'].lower() == 'true':
contentdb = 'CSV'
elif configSettingsJson['basic']['preprocessing'][problem_type]['VectorDB'].lower() == 'true':
status = importlib.util.find_spec('chromadb')
if not status:
contentdb = 'CSV'
else:
contentdb = 'VectorDB'
else:
status = importlib.util.find_spec('chromadb')
if not status:
contentdb = 'CSV'
else:
contentdb = 'VectorDB'
configSettingsJson['basic']['problem_type']=problem_type
context = {'cal_trainingTime':cal_trainingTime,'filesCount':filesCount,'filesSize':filesSize, 'is_extrapolation': is_extrapolation,'advconfig': configSettingsJson,'usecaseid':usecaseid,'usecasename':usecasename,
'selected_use_case': selected_use_case, 'noOfRecords': records, 'ModelStatus': ModelStatus, 'warning':warning,'time_series_warning':time_series_warning,'is_DataImbalance' : is_DataImbalance,
'ModelVersion': ModelVersion, 'currentstate': request.session['currentstate'],
'modelCondition':modelCondition,'finalstate': request.session['finalstate'], 'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab,'noOfTrainingFeatures':noOfTrainingFeatures,'version':AION_VERSION,'noofIteration':noofIteration,'contentdb':contentdb}
return render(request, 'training.html', context)
except Exception as e:
print(e)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
context = { 'error': 'Model Training Error','selected_use_case': selected_use_case,'contentdb':'','usecasename':usecasename,
'ModelVersion': ModelVersion,'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,'version':AION_VERSION}
return render(request, 'training.html', context)<s> from django.test import TestCase
# Create your tests here.
<s> from django import forms
from appfe.modelTraining.models import usecasedetails
from appfe.modelTraining.models import Existusecases
class usecasedetailsForm(forms.ModelForm):
class Meta:
model = usecasedetails
fields = "__all__"
class ExistusecasesForm(forms.ModelForm):
class Meta:
model = Existusecases
fields = "__all__" <s> from django.contrib import admin
# Register your models here.
<s><s> from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
from appbe.dataPath import DEFAULT_FILE_PATH
from appbe.dataPath import DATA_FILE_PATH
from appbe.dataPath import CONFIG_FILE_PATH
from appbe.dataPath import DEPLOY_LOCATION
from appbe.pages import getversion
from appbe.aion_config import running_setting
from appfe.modelTraining.models import usecasedetails
from appfe.modelTraining.models import Existusecases
from appbe import compute
AION_VERSION = getversion()
def basicconfig(request):
try:
from appbe.aion_config import settings
usecasetab = settings()
from appbe import basic_Config as bc
request.session['defaultfilepath'] = DEFAULT_FILE_PATH
request.session['configfilepath'] = CONFIG_FILE_PATH
request.session['deploylocation'] = DEPLOY_LOCATION
computeinfrastructure = compute.readComputeConfig()
submit_button = request.POST.get('upload_submit')
ModelVersion = request.session['ModelVersion']
ruuningSetting = running_setting()
selected_use_case = request.session['UseCaseName']
ModelStatus = request.session['ModelStatus']
#print(request.method)
# Retraing settings changes
if request.method == 'POST' and request.session['finalstate'] == 0:
context = bc.gotoconf(request)
else:
context = bc.openbasicconf(request)
context['computeinfrastructure'] = computeinfrastructure
context['version'] = AION_VERSION
context['usecasetab'] = usecasetab
return render(request, 'basicconfig.html', context)
except Exception as e:
print(e)
import sys,os
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
return render(request, 'basicconfig.html', {'error':'Fail to retreive basic config file inputs','version':AION_VERSION,'usecasetab':usecasetab})
def savebasicconfig(request):
from appbe import basic_Config as bc
from appbe import advance_Config as ac
from appfe.modelTraining.train_views import trainmodel
try:
if request.method != 'GET':
status,msg,context =bc.save(request)
else:
status = 'pass'
msg = ''
except Exception as e:
print(e)
if status.lower()!='error':
if request.method == 'GET':
context = ac.basicconfignex(request)
computeinfrastructure = compute.readComputeConfig()
context['computeinfrastructure'] = computeinfrastructure
context['version'] = AION_VERSION
context['currentstate'] = 1
return render(request, 'advancedconfig.html', context)
elif request.POST.get('BasicSubmit') == 'GotoAdvance':
context = ac.basicconfignex(request)
computeinfrastructure = compute.readComputeConfig()
context['computeinfrastructure'] = computeinfrastructure
context['version'] = AION_VERSION
context['currentstate'] = 1
return render(request, 'advancedconfig.html', context)
else:
return trainmodel(request)
else:
context = bc.openbasicconf(request)
computeinfrastructure = compute.readComputeConfig()
context['computeinfrastructure'] = computeinfrastructure
context['config_error']= msg
context['version'] = AION_VERSION
return render(request, 'basicconfig.html', context)
<s> from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
import time
from django.template import loader
from django import template
from django.views.decorators.csrf import csrf_exempt
from os import walk
from plotly.subplots import make_subplots
import plotly.graph_objects as go
from appbe import help_Text as ht
import random
from appbe import service_url
from appbe import compute
from appbe import installPackage
from appbe.pages import getusercasestatus
from appbe import images_analysis as ia
from django.db.models import Max, F
from appbe.aion_config import settings
from appbe.aion_config import get_graviton_data
from appbe.aion_config import get_llm_data
from appbe.aion_config import get_edafeatures
from appbe.training import calculate_total_activities
from appbe.training import calculate_total_interations
from appbe.training import checkModelUnderTraining
from appbe.training import checkversionrunningstatus
from appbe.training import changeModelStatus
from appbe.training import getStatusCount
from appbe.training import getModelStatus
from appbe.training import check_unsupported_col
from appbe.publish import chech_publish_info
from appbe.publish import check_input_data
import uuid
import numpy as np
from appbe.aion_config import kafka_setting
from appbe.aion_config import running_setting
from appbe.validatecsv import csv_validator
from appbe.aion_config import addKafkaModel
from appbe.aion_config import getrunningstatus
from appbe.aion_config import aion_service
from appbe.pages import getversion
from appbe.s3bucketsDB import get_s3_bucket
from appbe.s3bucketsDB import read_s3_bucket
from appbe.gcsbucketsDB import get_gcs_bucket
from appbe.gcsbucketsDB import read_gcs_bucket
from appbe.azureStorageDB import get_azureStorage
from appbe.azureStorageDB import read_azureStorage
from appbe.dataIngestion import getcommonfields
from appbe.dataIngestion import ingestDataFromFile
from appbe.dataIngestion import delimitedsetting
import pdfplumber
from docx import Document
from appbe.trainresult import ParseResults
import pandas as pd
import numpy as np
import re
import xml.etree.ElementTree as ET
import json
import glob
from appbe import dataPath
from pathlib import Path
import urllib, base64
import os
from os.path import expanduser
import platform
import time
import sys
import csv
import subprocess
import base64
from appfe.modelTraining.models import usecasedetails
from appfe.modelTraining.forms import usecasedetailsForm
from appfe.modelTraining.models import Existusecases
from django.shortcuts import get_list_or_404, get_object_or_404
from pandas import json_normalize
from django.contrib.sessions.models import Session
import logging
from appbe.dataPath import DEFAULT_FILE_PATH
from appbe.dataPath import DATA_FILE_PATH
from appbe.dataPath import CONFIG_FILE_PATH
from appbe.dataPath import DEPLOY_LOCATION
from utils.file_ops import read_df_compressed
from appbe.dataPath import LOG_LOCATION
from appbe.log_ut import logg
LOG_FILE_NAME = 'model_training_logs.log'
LOG_FOLDER = 'log'
if os.path.isdir(DATA_FILE_PATH) == False:
os.makedirs(DATA_FILE_PATH)
if os.path.isdir(CONFIG_FILE_PATH) == False:
os.makedirs(CONFIG_FILE_PATH)
if os.path.isdir(DEPLOY_LOCATION) == False:
os.makedirs(DEPLOY_LOCATION |
)
# EION_SCRIPT_PATH = 'C:\\\\Project\\\\Analytics\\\\eion\\\\eion\\\\eion.py'
PYTHON_PATH = 'python.exe'
AION_VERSION = getversion()
usecasetab = settings()
#AION_VERSION
# MainPage
logg_obj = logg(LOG_LOCATION)
log = logg_obj.create_log(AION_VERSION)
def index(request):
from appbe.pages import index_page
status,context,action = index_page(request,usecasedetails,Existusecases)
context['version'] = AION_VERSION
return render(request,action,context)
def localsetings(request):
from appbe.pages import get_usecase_page
try:
compute.updatelocalsetings(request)
time.sleep(2)
request.session['IsRetraining'] = 'No'
#print(1)
status,context,action = get_usecase_page(request,usecasedetails,Existusecases)
context['version'] = AION_VERSION
#print(2)
return render(request,action,context)
except Exception as e:
print(e)
return render(request, 'usecases.html',{'error': 'Fail to update localsetings','version':AION_VERSION})
def computetoAWS(request):
from appbe.pages import get_usecase_page
try:
compute.updateToComputeSettings(request)
time.sleep(2)
#print(1)
request.session['IsRetraining'] = 'No'
status,context,action = get_usecase_page(request,usecasedetails,Existusecases)
context['version'] = AION_VERSION
return render(request,action,context)
except Exception as e:
print(e)
return render(request, 'usecases.html',{'error': 'Fail to update ComputeSettings','version':AION_VERSION})
def licensekey(request):
try:
command = request.POST['licensesubmit']
if command.lower() == 'generatelicense':
userkey = request.POST['userkey']
from records import pushrecords
msg = pushrecords.generateLicenseKey(userkey)
context = {'msg':msg}
context['selected'] = 'License'
print(context)
return render(request,'licenseexpired.html',context)
else:
licensekey = request.POST['licensekey']
from records import pushrecords
pushrecords.updateLicense(licensekey)
from appbe.pages import get_usecase_page
status,context,action = get_usecase_page(request,usecasedetails,Existusecases)
context['version'] = AION_VERSION
return render(request,action,context)
except Exception as e:
print(e)
return render(request, 'usecases.html',{'error': 'Fails in loading the page','version':AION_VERSION})
def help(request):
context = {'selected': 'userguide', 'usecasetab': usecasetab}
context['version'] = AION_VERSION
return render(request, 'help.html', context)
def mlac_userguide(request):
context = {'selected': 'mlac_userguide', 'usecasetab': usecasetab}
context['version'] = AION_VERSION
return render(request, 'help.html', context)
def AionProblem(request):
if request.method == "POST":
AionProblem = request.POST["Algorithm"]
request.session["AionProblem"] = AionProblem
return HttpResponse(AionProblem)
def features(request):
if request.method == "POST":
typedata = request.POST['datatype']
if typedata == "datapath":
datapath = request.POST['datap']
if(os.path.isfile(datapath) and os.path.isfile(datapath)):
df = pd.read_csv(datapath)
modelfeature = df.columns.tolist()
modelfeatures = json.dumps(modelfeature)
return HttpResponse(modelfeatures)
else:
return HttpResponse(json.dumps("Data path does not exist "), content_type="application/error")
elif typedata == "scriptpath":
scriptPath = request.POST['scriptp']
#print(scriptPath)
f = open(scriptPath, "r")
pythoncode = f.read()
f.close()
ldict = {}
exec(pythoncode, globals(), ldict)
df = ldict['dfpy']
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
df.to_csv(dataFile, index=False)
modelfeature = df.columns.tolist()
output = {'features':modelfeature,'datafile':dataFile}
output = json.dumps(output)
# return render(request,'prediction.html',{'modelfeatures':modelfeatures,'test':'test'})
return HttpResponse(output)
def mllite(request):
from appbe.pages import mllite_page
context = mllite_page(request)
context['version'] = AION_VERSION
return render(request, 'ConvertOnnx.html',context)
def usecasefilter(request):
from appbe import mlstyles as mls
selectedoption = request.GET["selectedoption"]
context = mls.Aiusecases(request,selectedoption)
context['listtype'] = selectedoption
context['version'] = AION_VERSION
return render(request, 'aiUseCases.html',context)
def AIusecases(request):
from appbe import mlstyles as mls
context = mls.Aiusecases(request,'Implemented')
context['listtype'] = 'Implemented'
context['version'] = AION_VERSION
return render(request, 'aiUseCases.html',context)
def mlstyles(request):
from appbe import mlstyles as mls
context = mls.mlstyles(request)
context['selected'] = 'DataOperations'
context['version'] = AION_VERSION
return render(request, 'mlstyles.html',context)
def mlpredict(request):
from appbe import mlstyles as mls
context, button_flag = mls.mlpredict(request)
context['selected'] = 'DataOperations'
context['version'] = AION_VERSION
if button_flag in ['prediction','predictsingle']:
return render(request, 'mlstyles.html', context)
else:
return context
def mltrain(request):
from appbe import mlstyles as mls
context, button_flag = mls.mltrain(request)
context['selected'] = 'DataOperations'
context['version'] = AION_VERSION
if button_flag == 'training':
return render(request, 'mlstyles.html', context)
else:
return context
def getdatasetname(request):
try:
from appbe.dataPath import DATA_DIR
from appbe.sqliteUtility import sqlite_db
file_path = os.path.join(DATA_DIR,'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
temp_data = sqlite_obj.read_data('dataingest')
data = []
for x in temp_data:
data_dict = {}
data_dict['datasetname'] = x[1]
data.append(data_dict)
except Exception as e:
print(e)
data = []
return HttpResponse(json.dumps(data))
def outputdrift(request):
try:
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
computeinfrastructure = compute.readComputeConfig()
if ModelStatus != 'SUCCESS':
context = {'error': 'Please train the model first or launch an existing trained model', 'selected_use_case': selected_use_case,'usecasetab':usecasetab,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'monitoring','computeinfrastructure':computeinfrastructure}
else:
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
problemtypes = configSettingsJson['basic']['analysisType']
problem_type = ""
for k in problemtypes.keys():
if configSettingsJson['basic']['analysisType'][k] == 'True':
problem_type = k
break
problem = problem_type
ser_url = service_url.read_performance_service_url_params()
iterName = request.session['UseCaseName'].replace(" ", "_")
ModelVersion = request.session['ModelVersion']
ser_url = ser_url+'performance?usecaseid='+iterName+'&version='+str(ModelVersion)
if problem.lower() not in ['classification','regression']:
context = {'error': 'Output drift only available for classification and regression problems type', 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,
'ModelVersion': ModelVersion, 'selected': 'monitoring','computeinfrastructure':computeinfrastructure,'usecasetab':usecasetab}
else:
context = {'SUCCESS': 'Model is trained', 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'usecasetab':usecasetab,
'ModelVersion': ModelVersion, 'selected': 'monitoring','computeinfrastructure':computeinfrastructure,'ser_url':ser_url,'trainingDataLocation':request.session['datalocation']}
return render(request, 'outputdrif.html', context)
except:
return render(request, 'outputdrif.html', {'error':'Fail to do outputdrift analysis','usecasetab':usecasetab})
# -------------------------------- Graviton-Integration Changes S T A R T --------------------------------
def getuserdata(request):
import requests
data = []
try:
graviton_url,graviton_userid = get_graviton_data()
gravitonURL = graviton_url
gravitonUserId = graviton_userid
# url = 'https://xenius.azurewebsites.net/api/dataservices?userid='+
url = gravitonURL + 'dataservices?userid=' + gravitonUserId
print(url)
response = requests.get(url)
statuscode = response.status_code
print(statuscode)
if statuscode == 200:
json_dictionary = json.loads(response.content)
data = json_dictionary['result']
print(data)
except Exception as e:
print(e)
data = []
data_json = json.dumps(data)
return HttpResponse(data_json)
def getdataservice(request):
import requests
data = []
dataServiceId = request.GET.get('DataServiceId')
try:
graviton_url,graviton_userid = get_graviton_data()
gravitonURL = graviton_url
gravitonUserId = graviton_userid
# url = 'https://xenius.azurewebsites.net/api/getmetadata?userid=1&dataserviceid='+str(dataServiceId)
url = gravitonURL + 'getmetadata?userid=' + gravitonUserId +'&dataserviceid='+str(dataServiceId)
response = requests.get(url)
statuscode = response.status_code
if statuscode == 200:
json_dictionary = json.loads(response.content)
data = json_dictionary['result']
except Exception as e:
print(e)
data = []
data_json = json.dumps(data)
return HttpResponse(data_json)
# ------------------------------------------------ E N D -------------------------------------------------
def getvalidateddata(request):
import requests
computeinfrastructure = compute.readComputeConfig()
taskid = request.POST.get('elixirdatataskid')
try:
url = 'http://'+elixir_ip+':'+elixir_port+'/api/get_validation_result?task_id='+str(taskid)
#print(url)
response = requests.get(url)
statuscode = response.status_code
if statuscode == 200:
json_dictionary = json.loads(response.content)
data = json_dictionary['Result']
else:
data = []
except Exception as e:
print(e)
data = []
try:
df = pd.DataFrame.from_dict(data)
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
request.session['datalocation'] = str(dataFile)
df.to_csv(dataFile, index=False)
df_top = df.head(10)
df_json = df_top.to_json(orient="records")
df_json = json.loads(df_json)
statusmsg = 'Data File Uploaded Successfully '
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
request.session['currentstate'] = 0
request.session['finalstate'] = 0
request.session['datatype'] = 'Normal'
context = {'tab': 'tabconfigure','data': df_json,'status_msg': statusmsg,'selected_use_case': selected_use_case,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning',
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'exploratory':False,'computeinfrastructure':computeinfrastructure}
return render(request, 'upload.html', context)
except:
context = {'tab': 'tabconfigure','selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning','computeinfrastructure':computeinfrastructure,"usecaseerror":"Error in validating data!"}
return render(request, 'upload.html', context)
def trigger_DAG(request):
from appfe.modelTraining import AirflowLib
response = AirflowLib.TriggerDag("example_complex", "")
return HttpResponse(response, content_type="application/json")
def Airflow(request):
try:
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
context = {'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,
'selected': 'monitoring', 'airflow': True}
return render(request, 'upload.html', context)
except:
return render(request, 'upload.html', {'error':'interrupted error'})
def Results(request):
return render(request, 'modeltraning.html', context)
def uploadnext(request):
return render(request, 'basicconfig.html', {'selected': 'modeltraning','version':AION_VERSION})
def basicconfignext(request):
from appbe import advance_Config as ac
context = ac.basicconfignex(request)
|