file
stringlengths
6
44
content
stringlengths
38
162k
__init__.py
null
__init__.py
null
__init__.py
null
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
incBatchLearning.py
import sys import os import pickle import json import timeit import warnings import traceback import logging from pathlib import Path warnings.filterwarnings("ignore") import numpy as np import pandas as pd import matplotlib.pyplot as plt from pandas import json_normalize import shutil from word2number import w2n from pytz import timezone import datetime from sklearn.model_selection import train_test_split from sklearn.metrics import roc_auc_score, accuracy_score, r2_score,mean_absolute_error, mean_squared_error, recall_score, precision_score, f1_score from river import stream class incBatchLearner(): def __init__(self): self.home = os.path.dirname(os.path.abspath(__file__)) self.configPath = os.path.join(self.home, 'production', 'Config.json') self.configDict = {} self.updConfigDict = None self.incFillPath = os.path.join(self.home,'production','profiler','incFill.pkl') self.incOutlierRemPath = os.path.join(self.home, 'production', 'profiler', 'incOutlierRem.pkl') self.incLabelMappingPath = os.path.join(self.home,'production', 'profiler' , 'incLabelMapping.pkl') self.incCatEncoderPath = os.path.join(self.home, 'production' , 'profiler', 'incCatEncoder.pkl') self.incScalerPath = os.path.join(self.home, 'production', 'profiler','incScaler.pkl') self.testPath = os.path.join(self.home, 'data', 'test.csv') self.modelName = '' self.incFill = None self.incLabelMapping = None self.incCatEncoder = None self.incScaler = None self.incOutlierRem = None self.model = None self.targetCol = None self.numFtrs = [] self.catFtrs = [] self.allFtrs = [] self.logFileName=os.path.join(self.home,'log','model_training_logs.log') filehandler = logging.FileHandler(self.logFileName, 'a','utf-8') formatter = logging.Formatter('%(message)s') filehandler.setFormatter(formatter) self.log = logging.getLogger('eion') self.log.propagate = False self.log.addHandler(filehandler) self.log.setLevel(logging.INFO) def readData(self, data, isTest = False): if not isTest: self.log.info('New Data Path: '+str(data)) else: self.log.info('Test Data Path: '+str(data)) startTime = timeit.default_timer() if os.path.splitext(data)[1] == ".tsv": df=pd.read_csv(data,encoding='utf-8',sep='\t') elif os.path.splitext(data)[1] == ".csv": df=pd.read_csv(data,encoding='utf-8') elif os.path.splitext(data)[1] == ".dat": df=pd.read_csv(data,encoding='utf-8') else: if os.path.splitext(data)[1] == ".json": with open(data,'r',encoding='utf-8') as f: jsonData = json.load(f) else: jsonData = json.loads(data) df = json_normalize(jsonData) dataLoadTime = timeit.default_timer() - startTime self.log.info('\nData Load time(sec) :'+str(dataLoadTime)) self.log.info('\n First ten rows of new data') self.log.info(df.head(10)) self.log.info('Data Frame shape: '+str(df.shape)) df.rename(columns=lambda x:x.strip(), inplace=True) return df def readConfig(self): with open(self.configPath, 'r', encoding= 'utf8') as f: self.configDict = json.load(f) self.configDict['partialFit']+=1 self.log.info('************* Partial Fit '+str(self.configDict['partialFit'])+' *************** \n') msg = '-------> Execution Start Time: '+ datetime.datetime.now(timezone("Asia/Kolkata")).strftime('%Y-%m-%d %H:%M:%S' + ' IST') self.log.info(msg) self.targetCol = self.configDict['targetCol'] if 'numFtrs' in self.configDict: self.numFtrs = self.configDict['numFtrs'] if 'catFtrs' in self.configDict: self.catFtrs = self.configDict['catFtrs'] if 'allNumCols' in self.configDict: self.allNumCols = self.configDict['allNumCols'] if 'allCatCols' in self.configDict: self.allCatCols = self.configDict['allCatCols'] if 'encCols' in self.configDict: self.encCols = self.configDict['encCols'] if 'wordToNumCols' in self.configDict: self.wordToNumericCols = self.configDict['wordToNumCols'] self.emptyFtrs = self.configDict['emptyFtrs'] if 'encTarget' in self.configDict: self.encTarget = self.configDict['encTarget'] if 'noOfClasses' in self.configDict: self.allClasses = list(range(int(self.configDict['noOfClasses']))) self.misval_ratio = self.configDict['misval_ratio'] self.allFtrs = self.configDict['allFtrs'] self.modelName = self.configDict['modelName'] self.problemType = self.configDict['problemType'] self.modelPath = os.path.join(self.home, 'production', 'model', self.modelName+'.pkl') self.scoreParam = self.configDict['scoreParam'] self.score = self.configDict['score'] def pickleLoad(self, file, filename): if os.path.exists(file): with open(file, 'rb') as f: model = pickle.load(f) file_size = os.path.getsize(file) self.log.info(str(filename)+" size is :"+str(file_size)+"bytes") return model else: return None def s2n(self,value): try: x=eval(value) return x except: try: return w2n.word_to_num(value) except: return np.nan def convertWordToNumeric(self,dataframe,feature): try: dataframe[feature]=dataframe[feature].apply(lambda x: self.s2n(x)) return dataframe except Exception as inst: self.log.info("convertWordToNumeric Failed ===>"+str(inst)) return dataframe def pickleDump(self, model, path): if model is not None: with open(path, 'wb') as f: pickle.dump(model, f) def splitTrainTest(self,X,y): if self.problemType.lower() == 'regression': xtrain,xtest,ytrain,ytest=train_test_split(X,y,test_size=0.2,shuffle=True) else: try: xtrain,xtest,ytrain,ytest=train_test_split(X,y,stratify=y,test_size=0.2,shuffle=True) except: xtrain,xtest,ytrain,ytest=train_test_split(X,y,test_size=0.2,shuffle=True) return xtrain,xtest,ytrain,ytest def loadSavedModels(self): self.incFill = self.pickleLoad(self.incFillPath, 'Online Missing Value Filler') self.incLabelMapping = self.pickleLoad(self.incLabelMappingPath, 'Online Label Encoder') self.incCatEncoder = self.pickleLoad(self.incCatEncoderPath, 'Online Categorical Encoder') self.incScaler = self.pickleLoad(self.incScalerPath, 'Online Scaler') self.incOutlierRem = self.pickleLoad(self.incOutlierRemPath, 'Online Outlier Detector') self.model = self.pickleLoad(self.modelPath, str(os.path.basename(self.modelPath))[:-4]) self.log.info('\nData Profiler and ML models loaded in Memory') def saveModels(self): os.makedirs(os.path.join(self.home, 'production', 'profiler')) os.makedirs(os.path.join(self.home, 'production', 'model')) if type(self.configDict['num_fill']) == type({}) or type(self.configDict['cat_fill']) == type({}): self.pickleDump(self.incFill, self.incFillPath) self.pickleDump(self.incLabelMapping, self.incLabelMappingPath) self.pickleDump(self.incCatEncoder, self.incCatEncoderPath) self.pickleDump(self.incScaler, self.incScalerPath) self.pickleDump(self.incOutlierRem, self.incOutlierRemPath) self.pickleDump(self.model, self.modelPath) self.log.info('Models saved into production') def saveConfig(self): with open(self.configPath, 'w', encoding= 'utf8') as f: json.dump(self.updConfigDict, f, ensure_ascii=False) def apply_river_model(self, x, profModel, isTest): if not isTest: profModel.learn_one(x) return pd.Series(profModel.transform_one(x)) def apply_enc(self, x, isTest): if not isTest: y = x[self.encTarget] self.incCatEncoder.learn_one(x, y) return pd.Series(self.incCatEncoder.transform_one(x)) def apply_od_pipe(self, x): score = self.incOutlierRem.score_one(x) is_anomaly = self.incOutlierRem.classify(score) self.incOutlierRem.learn_one(x) return is_anomaly def dataFramePreProcess(self, df): df = df.replace('-', np.NaN) df = df.replace('?', np.NaN) df = df.replace(r'^\s*$', np.NaN, regex=True) columns = list(df.columns) if self.wordToNumericCols: for ftr in self.wordToNumericCols: if ftr in columns: self.log.info('Converting '+ftr+' to numeric type...') tempDataFrame=df.copy(deep=True) testDf = self.convertWordToNumeric(tempDataFrame,ftr) try: df[ftr]=testDf[ftr].astype(float) except: pass columns = list(df.columns) for empCol in self.emptyFtrs: if empCol in columns: df = df.drop(columns=[empCol]) columns = list(df.columns) self.log.info( 'Detecting Missing Values') nonNAArray=[] numOfRows = df.shape[0] for i in columns: numNa=df.loc[(pd.isna(df[i])),i ].shape[0] nonNAArray.append(tuple([i,numNa])) self.missingCols = [] self.emptyCols = [] for item in nonNAArray: numofMissingVals = item[1] if(numofMissingVals !=0): self.log.info('-------> Feature '+str(item[0])) self.log.info('----------> Number of Empty Rows '+str(numofMissingVals)) self.missingCols.append(item[0]) if(numofMissingVals >= numOfRows * self.misval_ratio): self.log.info('----------> Empty: Yes') self.log.info('----------> Permitted Rows: '+str(int(numOfRows * self.misval_ratio))) self.emptyCols.append(item[0]) if(len(self.missingCols) !=0): self.log.info( '----------- Detecting for Missing Values End -----------\n') else: self.log.info( '-------> Missing Value Features :Not Any') self.log.info( '----------- Detecting for Missing Values End -----------\n') return df def profiler(self, df, isTest=False): if not isTest: self.log.info('Starting profiling of New Training Data') else: self.log.info('Starting profiling of Testing Data') startTime = timeit.default_timer() df = self.dataFramePreProcess(df) if 'num_fill' in self.configDict: if self.configDict['num_fill'] == 'drop': df = df.dropna(axis = 0, subset=self.allNumCols) elif self.configDict['num_fill'] == 'zero': df[self.allNumCols] = df[self.allNumCols].fillna(value = 0.0) else: df[self.allNumCols]= df[self.allNumCols].apply(pd.to_numeric) df = df.astype(object).where(df.notna(), None) #river expects nan values to be None df[self.allNumCols]= df[self.allNumCols].apply(lambda row: self.apply_river_model(row.to_dict(), self.incFill['num_fill'], isTest), axis='columns') if not isTest: self.updConfigDict['num_fill'] = {col:self.incFill['num_fill'].stats[col].get() for col in self.allNumCols} if 'cat_fill' in self.configDict: if self.configDict['cat_fill'] == 'drop': df = df.dropna(axis = 0, subset=self.allCatCols) elif self.configDict['cat_fill'] == 'zero': df[self.allCatCols] = df[self.allCatCols].fillna(value = 0.0) else: df = df.astype(object).where(df.notna(), None) df[self.allCatCols]= df[self.allCatCols].apply(lambda row: self.apply_river_model(row.to_dict(),self.incFill['cat_fill'], isTest), axis='columns') if not isTest: self.updConfigDict['cat_fill'] = {col:self.incFill['cat_fill'].stats[col].get() for col in self.allCatCols} if not isTest: self.log.info('Missing value profiler model updated') if self.incLabelMapping: uq_classes = df[self.targetCol].unique() le_classes = list(self.incLabelMapping.classes_) uq_classes = [type(le_classes[0])(x) for x in uq_classes] unseen_classes = set(uq_classes) - set(le_classes) self.log.info('Existing classes: '+str(le_classes)) if len(unseen_classes)>0: self.log.info('New unseen classes: '+str(unseen_classes)) le_classes.extend(unseen_classes) from sklearn.preprocessing import LabelEncoder self.incLabelMapping = LabelEncoder() self.incLabelMapping.fit(le_classes) self.log.info(self.incLabelMapping.classes_) self.log.info('Label encoder refitted with new unseen classes') df[self.targetCol] = df[self.targetCol].apply(str) df[self.targetCol] = self.incLabelMapping.transform(df[self.targetCol]) if not isTest: self.log.info('Target column label encoding is done') if self.incCatEncoder: if self.problemType.lower() == 'regression': from sklearn.preprocessing import StandardScaler sc = StandardScaler() self.encTarget = 'scaledTarget' df['scaledTarget'] = sc.fit_transform(df[self.targetCol].to_numpy().reshape(-1,1)) transformed_data = df[self.encCols].apply(lambda row: self.apply_enc(row.to_dict(), isTest), axis='columns') if self.targetCol in transformed_data.columns: transformed_data.drop(self.targetCol, inplace=True, axis = 1) df[self.catFtrs] = transformed_data if not isTest: self.updConfigDict['catEnc'] = [] if len(self.catFtrs) == 1: col = self.catFtrs[0] self.configDict['catEnc'].append({col:self.incCatEncoder['TargetAgg'].state.to_dict()}) else: for i, col in enumerate(self.catFtrs): if i==0: no = '' else: no = str(i) self.configDict['catEnc'].append({col:self.incCatEncoder['TransformerUnion']['TargetAgg'+no].state.to_dict()}) self.log.info('Categorical encoding is done and profiler model updated') if self.incScaler: if not isTest: self.incScaler = self.incScaler.partial_fit(df[self.numFtrs]) self.log.info('Numerical features scaled and profiler model updated') df[self.numFtrs] = self.incScaler.transform(df[self.numFtrs]) if self.incOutlierRem and not isTest: df = df[df[self.numFtrs].apply(lambda x: False if self.apply_od_pipe(x.to_dict()) else True, axis=1)] df.reset_index(drop=True, inplace=True) self.log.info('Outliers removed and profiler model updated') if not isTest: self.log.info('Check config file in production folder for updated profiler values') profilerTime = timeit.default_timer() - startTime self.log.info('\nProfiling time(sec) :'+str(profilerTime)) return df def riverTrain(self, X, Y): trainStream = stream.iter_pandas(X, Y) for i, (xi, yi) in enumerate(trainStream): if yi!=None: self.model.learn_one(xi, yi) def riverEvaluate(self, xtest): testStream = stream.iter_pandas(xtest) preds = [] for xi,yi in testStream: pred = self.model.predict_one(xi) preds.append(pred) return preds def trainModel(self,df): startTime = timeit.default_timer() X = df[self.allFtrs] Y = df[self.targetCol] try: self.riverTrain(X,Y) trainTime = timeit.default_timer() - startTime self.log.info('\nModel Training time(sec) :'+str(trainTime)) self.log.info(self.modelName+' model updated') self.log.info('First fit model params are '+str(self.configDict['modelParams'])) except Exception as e: raise e def archiveModels(self): source = os.path.join(self.home, 'production') archivePath = os.path.join(self.home,'archives') if os.path.isdir(archivePath): NoOfArchives = sum(os.path.isdir(os.path.join(self.home,'archives',str(i))) for i in os.listdir(archivePath)) destination = os.path.join(self.home,'archives',str(NoOfArchives+1)) else: destination = os.path.join(archivePath,'1') if not os.path.exists(destination): os.makedirs(destination) allfiles = os.listdir(source) for f in allfiles: src_path = os.path.join(source, f) dst_path = os.path.join(destination, f) shutil.move(src_path, dst_path) self.log.info('Previous production models archived') def get_score(self,metric,actual,predict): if 'accuracy' in str(metric).lower(): score = accuracy_score(actual,predict) score = score*100 elif 'recall' in str(metric).lower(): score = recall_score(actual,predict,average='macro') score = score*100 elif 'precision' in str(metric).lower(): score = precision_score(actual,predict,average='macro') score = score*100 elif 'f1_score' in str(metric).lower(): score = f1_score(actual,predict, average='macro') score = score*100 elif 'roc_auc' in str(metric).lower(): try: score = roc_auc_score(actual,predict,average="macro") except: try: actual = pd.get_dummies(actual) predict = pd.get_dummies(predict) score = roc_auc_score(actual,predict, average='weighted', multi_class='ovr') except: score = 0 score = score*100 elif ('mse' in str(metric).lower()) or ('neg_mean_squared_error' in str(metric).lower()): score = mean_squared_error(actual,predict) elif ('rmse' in str(metric).lower()) or ('neg_root_mean_squared_error' in str(metric).lower()): score=mean_squared_error(actual,predict,squared=False) elif ('mae' in str(metric).lower()) or ('neg_mean_absolute_error' in str(metric).lower()): score=mean_absolute_error(actual,predict) elif 'r2' in str(metric).lower(): score=r2_score(actual,predict)*100 return round(score,2) def checkColumns(self, df): self.log.info('Checking DataColumns in new data') dfCols = list(df.columns) allCols = self.allFtrs.copy() allCols.append(self.targetCol) missingCols = [] for col in allCols: if col not in dfCols: missingCols.append(col) if len(missingCols)>0: raise Exception('DataFrame is missing columns: '+str(missingCols)) else: self.log.info('All required columns are present: '+str(list(dfCols)[:500])) def plotMetric(self): y = self.configDict['metricList'] fedrows = self.configDict['trainRowsList'] fig = plt.figure() ax = fig.gca() if self.configDict['problemType'] == 'classification': ax.set_yticks(np.arange(0, 110, 10)) plt.ylim(ymin=0) if self.configDict['problemType'] == 'regression': minMet = min(y) maxMet = max(y) plt.ylim(minMet - 10, maxMet+10) plt.plot(y) plt.ylabel(self.scoreParam) plt.xlabel('Partial Fits') plt.title(str(self.scoreParam)+' over training rows') if type(fedrows[0])!=type(''): fedrows = [str(x) for x in fedrows] x = list(range(len(fedrows))) for i in range(len(fedrows)): plt.annotate(fedrows[i], (x[i], y[i] + 5)) if self.configDict['problemType'] == 'classification': plt.annotate(round(y[i],1), (x[i], y[i] - 3)) plt.grid() plt.savefig(os.path.join(self.home, 'production','metric')) return def updateLearning(self,data): try: self.readConfig() self.updConfigDict = self.configDict.copy() df = self.readData(data) self.checkColumns(df) self.loadSavedModels() X = df[self.allFtrs] y = df[self.targetCol] xtrain,xtest,ytrain,ytest = self.splitTrainTest(X,y) dftrain = pd.concat((xtrain, ytrain), axis = 1) dftest = pd.concat((xtest, ytest), axis = 1) dftrain = self.profiler(dftrain) dftest = self.profiler(dftest, isTest = True) xtest = dftest[self.allFtrs] ytest = dftest[self.targetCol] self.trainModel(dftrain) preds = self.riverEvaluate(xtest) score = self.get_score(self.scoreParam, ytest, preds) self.updConfigDict['score'] = score self.log.info('Previous '+self.scoreParam+': '+str(self.configDict['score'])) self.log.info('Current '+self.scoreParam+': '+str(self.updConfigDict['score'])) self.configDict['trainRowsList'].append(self.configDict['trainRowsList'][-1]+xtrain.shape[0]) self.log.info('Number of data points trained on so far: '+str(self.configDict['trainRowsList'][-1])) self.configDict['metricList'].append(self.updConfigDict['score']) self.archiveModels() self.plotMetric() self.saveModels() self.saveConfig() msg = self.scoreParam+': Previous:'+str(self.configDict['score'])+' Current:'+ str(self.updConfigDict['score']) output = {"status":"SUCCESS","Msg":msg} self.log.info(str(output)) except Exception as e: print(traceback.format_exc()) self.log.info('Partial Fit Failed '+str(traceback.format_exc())) if self.updConfigDict != None: self.saveConfig() output = {"status":"FAIL","Msg":str(e).strip('"')} return json.dumps(output) if __name__ == "__main__": incBLObj = incBatchLearner() output = incBLObj.updateLearning(sys.argv[1]) print("aion_learner_status:",output)
incBatchPrediction.py
import sys import os import pickle import json import traceback import warnings warnings.filterwarnings("ignore") import numpy as np import pandas as pd import scipy from pandas import json_normalize from word2number import w2n from river import stream class incBatchPredictor(): def __init__(self): self.home = os.path.dirname(os.path.abspath(__file__)) self.configPath = os.path.join(self.home, 'production', 'Config.json') self.configDict = {} self.incFillPath = os.path.join(self.home,'production','profiler','incFill.pkl') self.incLabelMappingPath = os.path.join(self.home,'production', 'profiler' , 'incLabelMapping.pkl') self.incCatEncoderPath = os.path.join(self.home, 'production' , 'profiler', 'incCatEncoder.pkl') self.incScalerPath = os.path.join(self.home, 'production', 'profiler','incScaler.pkl') self.incFill = None self.incLabelMapping = None self.incCatEncoder = None self.incScaler = None self.model = None self.targetCol = None self.modelName = '' self.problemType = '' self.numFtrs = [] self.catFtrs = [] def readData(self, data): try: if os.path.splitext(data)[1] == ".tsv": df=pd.read_csv(data,encoding='utf-8',sep='\t') elif os.path.splitext(data)[1] == ".csv": df=pd.read_csv(data,encoding='utf-8') elif os.path.splitext(data)[1] == ".dat": df=pd.read_csv(data,encoding='utf-8') else: if os.path.splitext(data)[1] == ".json": with open(data,'r',encoding='utf-8') as f: jsonData = json.load(f) else: jsonData = json.loads(data) df = json_normalize(jsonData) df.rename(columns=lambda x:x.strip(), inplace=True) return df except KeyError as e: output = {"status":"FAIL","message":str(e).strip('"')} print(json.dumps(output)) except Exception as e: output = {"status":"FAIL","message":str(e).strip('"')} print(json.dumps(output)) def readConfig(self): with open(self.configPath, 'r', encoding= 'utf8') as f: self.configDict = json.load(f) self.targetCol = self.configDict['targetCol'] if 'numFtrs' in self.configDict: self.numFtrs = self.configDict['numFtrs'] if 'catFtrs' in self.configDict: self.catFtrs = self.configDict['catFtrs'] if 'allNumCols' in self.configDict: self.allNumCols = self.configDict['allNumCols'] if 'allCatCols' in self.configDict: self.allCatCols = self.configDict['allCatCols'] if 'wordToNumCols' in self.configDict: self.wordToNumericCols = self.configDict['wordToNumCols'] self.emptyFtrs = self.configDict['emptyFtrs'] self.allFtrs = self.configDict['allFtrs'] self.modelName = self.configDict['modelName'] self.problemType = self.configDict['problemType'] self.modelPath = os.path.join(self.home, 'production', 'model', self.modelName+'.pkl') self.scoreParam = self.configDict['scoreParam'] self.score = self.configDict['score'] def pickleLoad(self, file): if os.path.exists(file): with open(file, 'rb') as f: model = pickle.load(f) return model else: return None def s2n(self,value): try: x=eval(value) return x except: try: return w2n.word_to_num(value) except: return np.nan def convertWordToNumeric(self,dataframe,feature): try: dataframe[feature]=dataframe[feature].apply(lambda x: self.s2n(x)) return dataframe except Exception as inst: self.log.info("convertWordToNumeric Failed ===>"+str(inst)) return dataframe def loadSavedModels(self): self.incFill = self.pickleLoad(self.incFillPath) self.incLabelMapping = self.pickleLoad(self.incLabelMappingPath) self.incCatEncoder = self.pickleLoad(self.incCatEncoderPath) self.incScaler = self.pickleLoad(self.incScalerPath) self.model = self.pickleLoad(self.modelPath) def apply_river_model(self, x, profModel): print(profModel.imputers) return pd.Series(profModel.transform_one(x)) def apply_enc(self, x): return pd.Series(self.incCatEncoder.transform_one(x)) def dataFramePreProcess(self, df): df = df.replace(r'^\s*$', np.NaN, regex=True) df = df.replace('-', np.nan) df = df.replace('?', np.nan) columns = list(df.columns) if self.wordToNumericCols: for ftr in self.wordToNumericCols: if ftr in columns: tempDataFrame=df.copy(deep=True) testDf = self.convertWordToNumeric(tempDataFrame,ftr) try: df[ftr]=testDf[ftr].astype(float) except: pass columns = list(df.columns) for empCol in self.emptyFtrs: if empCol in columns: df = df.drop(columns=[empCol]) return df def profiler(self, df): df = df[self.allFtrs] df = self.dataFramePreProcess(df) if 'num_fill' in self.configDict: if self.configDict['num_fill'] == 'drop': df = df.dropna(axis = 0, subset=self.allNumCols) elif self.configDict['num_fill'] == 'zero': df[self.numFtrs] = df[self.numFtrs].fillna(value = 0.0) else: for x in self.numFtrs: if x == self.targetCol: continue df[x] = df[x].fillna(value = self.configDict['num_fill'][x]) if 'cat_fill' in self.configDict: if self.configDict['cat_fill'] == 'drop': df = df.dropna(axis = 0, subset=self.allCatCols) elif self.configDict['cat_fill'] == 'zero': df[self.catFtrs] = df[self.catFtrs].fillna(value = 0.0) else: for x in self.catFtrs: if x == self.targetCol: continue df[x] = df[x].fillna(value = self.configDict['cat_fill'][x]) if self.incCatEncoder: transformed_data = df[self.catFtrs].apply(lambda row: self.apply_enc(row.to_dict()), axis='columns') df[self.catFtrs] = transformed_data if self.incScaler: df[self.numFtrs] = self.incScaler.transform(df[self.numFtrs]) return df def trainedModel(self,X): testStream = stream.iter_pandas(X) preds = [] if self.problemType.lower() == 'regression': for xi,yi in testStream: try: pred = self.model.predict_proba_one(xi) preds.append(pred) except: pred = self.model.predict_one(xi) preds.append(pred) preds = pd.DataFrame(preds) return preds elif self.problemType.lower() == 'classification': for xi,yi in testStream: try: pred = self.model.predict_proba_one(xi) preds.append(pred) except: continue out = pd.DataFrame(preds) return out def apply_output_format(self,df,modeloutput): if self.problemType.lower() == 'regression': df['prediction'] = modeloutput[0] df['prediction'] = df['prediction'].round(2) elif self.problemType.lower() == 'classification': modeloutput = round(modeloutput,2) if modeloutput.shape[1] == 1: df['prediction'] = modeloutput df['prediction'] = df['prediction'].astype(int) else: try: predCol = modeloutput.idxmax(axis=1) df['prediction'] = predCol.astype(int) df['prediction'] = self.incLabelMapping.inverse_transform(df['prediction']) except: df['prediction'] = modeloutput.idxmax(axis=1) df['probability'] = modeloutput.max(axis=1).round(2) modeloutput.columns = modeloutput.columns.astype(int) modeloutput.columns = self.incLabelMapping.inverse_transform(list(modeloutput.columns)) df['remarks'] = modeloutput.apply(lambda x: x.to_json(), axis=1) outputjson = df.to_json(orient='records') outputjson = {"status":"SUCCESS","data":json.loads(outputjson)} return(json.dumps(outputjson)) def predict(self,data): try: df = self.readData(data) dfOrg = df.copy() self.readConfig() if len(self.configDict)!=0: self.loadSavedModels() df = self.profiler(df) modeloutput = self.trainedModel(df) dfOrg = dfOrg[self.allFtrs] output = self.apply_output_format(dfOrg, modeloutput) else: pass except Exception as e: print(traceback.format_exc()) output = {"status":"FAIL","message":str(e).strip('"')} return output if __name__ == "__main__": incBPobj = incBatchPredictor() output = incBPobj.predict(sys.argv[1]) print("predictions:",output)
__init__.py
null
dl_model.py
import tensorflow as tf def dl_regression_model(input_shape, output_shape, optimizer, loss_func, act_func): inputs = tf.keras.Input(shape=(input_shape,)) x = tf.keras.layers.Dense(64, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(inputs) x = tf.keras.layers.Dense(32, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(x) x = tf.keras.layers.Dense(16, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(x) x = tf.keras.layers.Dense(8, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(x) outputs = tf.keras.layers.Dense(output_shape, kernel_initializer='he_normal', bias_initializer='zeros')(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) model.compile(loss=loss_func , optimizer=optimizer, metrics=["mean_absolute_error", "mean_squared_error", ]) return model def dl_multiClass_classification(input_shape, output_shape, optimizer, loss_func, act_func, last_act_func): inputs = tf.keras.Input(shape=(input_shape,)) x = tf.keras.layers.Dense(64, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(inputs) x = tf.keras.layers.Dense(32, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(x) x = tf.keras.layers.Dense(16, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(x) x = tf.keras.layers.Dense(8, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(x) outputs = tf.keras.layers.Dense(output_shape, kernel_initializer='he_normal', bias_initializer='zeros', activation=last_act_func)(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) model.compile(optimizer, loss_func, metrics=["accuracy"]) return model def dl_binary_classification(input_shape, output_shape, optimizer, loss_func, act_func, last_act_func): inputs = tf.keras.Input(shape=(input_shape,)) x = tf.keras.layers.Dense(64, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(inputs) x = tf.keras.layers.Dense(32, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(x) x = tf.keras.layers.Dense(16, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(x) x = tf.keras.layers.Dense(8, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(x) outputs = tf.keras.layers.Dense(output_shape, kernel_initializer='he_normal', bias_initializer='zeros', activation=last_act_func)(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) model.compile(optimizer, loss_func, metrics=["accuracy"]) return model
aionflc.py
# -*- coding: utf-8 -*- """ Created on Wed May 25 21:16:54 2022 @author: @aionteam """ import tensorflow as tf import warnings import flwr as flower import numpy as np import pandas as pd import os from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, mean_absolute_error,r2_score from sklearn.model_selection import train_test_split from sklearn.metrics import log_loss import utils import logging from flwr.common.logger import log from logging import INFO import time import pickle as pkl import json import sys import random import string from sklearn.preprocessing import StandardScaler import dl_model from sklearn import metrics ## Below import can be used when aion specific grpc communication used. # from aiongrpcclient import aiongrpcclient os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" os.environ["GRPC_VERBOSITY"] = "debug" logger = logging.getLogger('AION') """ The below aion fl client is for sklearn process""" class aionflc(flower.client.NumPyClient): def __init__(self,model,num_rounds,model_name,version,wait_time,client_id,num_records,model_overwrite,problem_type,X_train, X_test, y_train, y_test): self.count=0 self.num_rounds=round(num_rounds) self.model_name=model_name self.version=version self.wait_time=int(wait_time) self.client_id=client_id self.num_records=num_records self.model_overwrite=model_overwrite self.model=model self.problem_type=problem_type self.X_train, self.X_test, self.y_train, self.y_test=X_train, X_test, y_train, y_test # """ The below part not used now. In future, for our own grpc communication, this module will be used.Call this function where we want. Need to modify aiongrpcproto.proto according our requirement.""" # def callaiongrpcclient(self): # clientins = aiongrpcclient() # status=clientins.startgrpcclient() # return status #Save the final model def model_save(self,model): ##Locate standard model dir to save model cwd = os.path.abspath(os.path.dirname(__file__)) model_location=os.path.join(cwd, 'models') try: os.makedirs(model_location) except FileExistsError as fe: # here,model_location already exists pass model_name=self.model_name ## Saving model if (self.model_overwrite.lower() == 'false'): version=str(self.count) if (model_name.lower() == "deeplearning"): file_name=model_name+'_'+self.problem_type+'_'+version+".h5" saved_model=os.path.normpath(os.path.join(model_location,file_name)) log(INFO, "flclient saved_model path: %s ",str(saved_model)) try: model.save(saved_model) return True except Exception as e: logger.info("model save error. Err.Msg: "+str(e)) return False else: file_name=model_name+'_'+self.problem_type+'_'+version+".sav" saved_model=os.path.normpath(os.path.join(model_location,file_name)) log(INFO, "flclient saved_model path: %s ",str(saved_model)) try: with open (saved_model,'wb') as f: pkl.dump(model,f) return True except Exception as e: logger.info("model save error. Err.Msg: "+str(e)) return False elif (self.model_overwrite.lower() == 'true'): version=str(self.version) if (model_name.lower() == "deeplearning"): file_name=model_name+'_'+self.problem_type+'_'+version+".h5" saved_model=os.path.normpath(os.path.join(model_location,file_name)) log(INFO, "flclient saved_model path: %s ",str(saved_model)) try: model.save(saved_model) return True except Exception as e: logger.info("model save error. Err.Msg: "+str(e)) return False else: file_name=model_name+'_'+self.problem_type+'_'+version+".sav" saved_model=os.path.normpath(os.path.join(model_location,file_name)) log(INFO, "flclient saved_model path: %s ",str(saved_model)) try: with open (saved_model,'wb') as f: pkl.dump(model,f) return True except Exception as e: logger.info("model save error. Err.Msg: "+str(e)) return False else: ##Write own user instruction pass def get_parameters(self, config): return utils.get_model_parameters(self.model) def get_properties(self,model,time_out): """Return the current client properties.""" client_info={'client_id':self.client_id} time_out=100 return client_info,model,time_out def fit(self, parameters, config): utils.set_model_params(self.model, parameters) with warnings.catch_warnings(): warnings.simplefilter("ignore") num_partitions=round(self.num_rounds) # num_partitions=round(5) xtrain=np.array_split(self.X_train, num_partitions)[self.count] ytrain=np.array_split(self.y_train, num_partitions)[self.count] self.model.fit(xtrain, ytrain) time.sleep(self.wait_time) self.count+=1 print("-- Received Weights from Server.") print(f"\n Training finished for FL round: {config['rnd']}.\n") logger.info("-- Received Weights from Server. ") logger.info("Training finished for FL round: "+str(config['rnd'])+" -- Received Weights from Server") model_param=utils.get_model_parameters(self.model) model_param=list(model_param) return model_param, len(self.X_train),{} # def evaluate(self, parameters, config): # utils.set_model_params(self.model, parameters) # print("******** Test_1 ****************** \n") # if (self.problem_type.lower() == 'classification'): # if (self.model_name.lower() == 'logisticregression' ): # loss = log_loss(self.y_test, self.model.predict_proba(self.X_test)) # print("******** Test_1a ****************** \n") # else: # if (self.model_name.lower() == 'linearregression' ): # print("******** Test_1b ****************** \n") # # loss = log_loss(self.y_test, self.model.predict(self.X_test)) # rmse = np.sqrt(mean_squared_error(self.y_test, self.model.predict(self.X_test))) # mae = mean_absolute_error(self.y_test, self.model.predict(self.X_test)) # r2=r2_score(self.y_test, self.model.predict(self.X_test)) # loss = rmse # # accuracy=r2 # print(f"{self.client_id} Sending weights -- data processed {self.num_records}, -- Loss: {(rmse)}. -- r2: {r2}. ") # logger.info(str(self.client_id)+" Sending weights -- data processed "+str(self.num_records)+".-- Loss: "+str(rmse)+". -- r2: "+str(r2)) # logger.info("FL Client model intercept: "+str(model.intercept_)) # logger.info("FL Client model coefficients: "+str(model.coef_)) # self.model_save(self.model) # return loss, len(self.X_test), {"r2": r2} # print("******** Test_1c ****************** \n") # print("******** Test_2 ****************** \n") # accuracy = self.model.score(self.X_test, self.y_test) # print(f"{self.client_id} Sending weights -- data processed {self.num_records}, -- Loss: {(loss)}. -- accuracy: {accuracy}. ") # logger.info(str(self.client_id)+" Sending weights -- data processed "+str(self.num_records)+".-- Loss: "+str(loss)+". -- accuracy: "+str(accuracy)) # logger.info("FL Client model intercept: "+str(model.intercept_)) # logger.info("FL Client model coefficients: "+str(model.coef_)) # self.model_save(self.model) # return loss, len(self.X_test), {"accuracy": accuracy} def evaluate(self, parameters, config): utils.set_model_params(self.model, parameters) if (self.problem_type.lower() == 'classification'): if (self.model_name.lower() == 'logisticregression' ): loss = log_loss(self.y_test, self.model.predict_proba(self.X_test)) accuracy = self.model.score(self.X_test, self.y_test) print(f"{self.client_id} Sending weights -- data processed {self.num_records}, -- Loss: {(loss)}. -- accuracy: {accuracy}. ") logger.info(str(self.client_id)+" Sending weights -- data processed "+str(self.num_records)+".-- Loss: "+str(loss)+". -- accuracy: "+str(accuracy)) logger.info("FL Client model intercept: "+str(model.intercept_)) logger.info("FL Client model coefficients: "+str(model.coef_)) self.model_save(self.model) return loss, len(self.X_test), {"accuracy": accuracy} elif (self.problem_type.lower() == 'regression'): if (self.model_name.lower() == 'linearregression' ): # loss = log_loss(self.y_test, self.model.predict(self.X_test)) mse=mean_squared_error(self.y_test, self.model.predict(self.X_test)) rmse = np.sqrt(mean_squared_error(self.y_test, self.model.predict(self.X_test))) mae = mean_absolute_error(self.y_test, self.model.predict(self.X_test)) r2=r2_score(self.y_test, self.model.predict(self.X_test)) loss = rmse results = { "mean_absolute_error": mae, "mean_squared_error": mse, "root_mean_squared_error": rmse, "r2":r2, } print(f"{self.client_id} Sending weights -- data processed {self.num_records}, -- Loss: {(rmse)}. -- metrics: {results}. ") logger.info(str(self.client_id)+" Sending weights -- data processed "+str(self.num_records)+".-- Loss: "+str(rmse)+". -- metrics: "+str(results)) logger.info("FL Client model intercept: "+str(self.model.intercept_)) logger.info("FL Client model coefficients: "+str(self.model.coef_)) self.model_save(self.model) return loss, len(self.X_test), results """ The below aion fl client is for deep learning process. Why different client for sklearn and deeplearn ?: Because, flower calling the client object and process all functions (get_parameters,fit and evaluate) internally. So, user space we cannot combine both (sklearn n dl) using if..else. """ class aionflc_dl(flower.client.NumPyClient): def __init__(self,model,num_rounds,model_name,version,wait_time,client_id,num_records,model_overwrite,problem_type,X_train, X_test, y_train, y_test,model_params): self.count=0 self.num_rounds=round(num_rounds) self.model_name=model_name self.version=version self.wait_time=int(wait_time) self.client_id=client_id self.num_records=num_records self.model_overwrite=model_overwrite self.model=model self.problem_type=problem_type self.X_train, self.X_test, self.y_train, self.y_test=X_train, X_test, y_train, y_test self.model_params=model_params # """ The below part not used now. In future, for our own grpc communication, this module will be used.Call this function where we want. Need to modify aiongrpcproto.proto according our requirement.""" # def callaiongrpcclient(self): # clientins = aiongrpcclient() # status=clientins.startgrpcclient() # return status #Save the final model def model_save(self,model): ##Locate standard model dir to save model cwd = os.path.abspath(os.path.dirname(__file__)) model_location=os.path.join(cwd, 'models') try: os.makedirs(model_location) except FileExistsError as fe: # here,model_location already exists pass model_name=self.model_name # version=self.version ## Saving model if (self.model_overwrite.lower() == 'false'): version=str(self.count) if (model_name.lower() == "deeplearning"): file_name=model_name+'_'+self.problem_type+'_'+version+".h5" saved_model=os.path.normpath(os.path.join(model_location,file_name)) log(INFO, "flclient saved_model path: %s ",str(saved_model)) try: model.save(saved_model) return True except Exception as e: logger.info("model save error. Err.Msg: "+str(e)) return False else: file_name=model_name+'_'+self.problem_type+'_'+version+".sav" saved_model=os.path.normpath(os.path.join(model_location,file_name)) log(INFO, "flclient saved_model path: %s ",str(saved_model)) try: with open (saved_model,'wb') as f: pkl.dump(model,f) return True except Exception as e: logger.info("model save error. Err.Msg: "+str(e)) return False elif (self.model_overwrite.lower() == 'true'): version=str(self.version) if (model_name.lower() == "deeplearning"): file_name=model_name+'_'+self.problem_type+'_'+version+".h5" saved_model=os.path.normpath(os.path.join(model_location,file_name)) log(INFO, "flclient saved_model path: %s ",str(saved_model)) try: model.save(saved_model) return True except Exception as e: logger.info("model save error. Err.Msg: "+str(e)) return False else: file_name=model_name+'_'+self.problem_type+'_'+version+".sav" saved_model=os.path.normpath(os.path.join(model_location,file_name)) log(INFO, "flclient saved_model path: %s ",str(saved_model)) try: with open (saved_model,'wb') as f: pkl.dump(model,f) return True except Exception as e: logger.info("model save error. Err.Msg: "+str(e)) return False else: ##Write own user instruction pass def get_parameters(self, config): """Get parameters of the local model.""" return self.model.get_weights() def get_properties(self,model,time_out): """Return the current client properties.""" client_info={'client_id':self.client_id} time_out=100 return client_info,model,time_out def fit(self, parameters, config): """Train parameters on the locally held training set.""" # Update local model parameters self.model.set_weights(parameters) num_partitions=(self.num_rounds) # num_partitions=round(5) xtrain=np.array_split(self.X_train, num_partitions)[self.count] ytrain=np.array_split(self.y_train, num_partitions)[self.count] # y_train = np_utils.to_categorical(y_train, num_classes) # y_test = np_utils.to_categorical(y_test, num_classes) # Get hyperparameters for this round batch_size: int = int(self.model_params["batch_size"]) epochs: int = int(self.model_params["epochs"]) # round: int = config["rnd"] # self.round_id = round log(INFO, "===========================") log(INFO, "Start training model on local client %s round %i", self.client_id, config['rnd']) time.sleep(self.wait_time) self.count+=1 # Train the model using hyperparameters from config history = self.model.fit( xtrain, ytrain, batch_size, epochs, shuffle=False, # validation_split=0.1, validation_data=(self.X_test, self.y_test), verbose=1 ) # Return updated model parameters and results parameters_prime = self.model.get_weights() num_examples_train = len(self.X_train) model_name = self.model_name problem_type = self.problem_type if model_name == "deeplearning": if problem_type == "classification": acc = self.model.history.history['val_accuracy'] log(INFO, "Validated accuracy at the end of current round of client %s : %.2f %%", self.client_id, acc[-1]*100) log(INFO, "Finished training model on local client %s", self.client_id) results = { "loss": history.history["loss"][0], "accuracy": history.history["accuracy"][0], "val_loss": history.history["val_loss"][0], "val_accuracy": history.history["val_accuracy"][0], } if problem_type == "regression": mean_absolute_error = history.history['mean_absolute_error'][0] mean_squared_error = history.history['mean_squared_error'][0] y_pred = self.model.predict(self.X_test) from sklearn import metrics root_mean_squared_error = np.sqrt(metrics.mean_squared_error(self.y_test, y_pred)) log(INFO, "Mean Absolute Error at the end of current round of client %s : %f", self.client_id, mean_absolute_error) log(INFO, "Mean Squared Error at the end of current round of client %s : %f", self.client_id, mean_squared_error) log(INFO, "Root Mean Squared Error at the end of current round of client %s : %f", self.client_id, root_mean_squared_error) log(INFO, "Finished training model on local client %s", self.client_id) results = { "mean_absolute_error": mean_absolute_error, "mean_squared_error": mean_squared_error, "root_mean_squared_error": root_mean_squared_error, } return parameters_prime, num_examples_train, results def evaluate(self, parameters, config): """Evaluate parameters on the locally held test set.""" # Update local model with global parameters self.model.set_weights(parameters) num_partitions=(self.num_rounds) # Get config values # batch_size: int = config["val_batch_size"] batch_size: int = int(self.model_params["batch_size"]) steps: int = np.ceil(len(self.X_test)/batch_size) num_examples_test = len(self.X_test) log(INFO, "Run for only %i steps", steps) # Evaluate global model parameters on the local test data and return results model_name = self.model_name problem_type = self.problem_type self.model_save(self.model) if model_name == "deeplearning": if problem_type == "classification": loss, accuracy = self.model.evaluate(self.X_test, self.y_test,verbose=0) log(INFO, "Client %s : Accuracy %.2f %%", self.client_id, accuracy*100) log(INFO, "Client %s : Loss %.4f ", self.client_id, loss) return loss, num_examples_test, {"accuracy": accuracy} if problem_type == "regression": loss, mean_absolute_error, mean_squared_error = self.model.evaluate(self.X_test, self.y_test, steps=steps,verbose=1) y_pred = self.model.predict(self.X_test) root_mean_squared_error = np.sqrt(metrics.mean_squared_error(self.y_test, y_pred)) log(INFO, "Client %s : mean_absolute_error %f ", self.client_id, mean_absolute_error) log(INFO, "Client %s : mean_squared_error %f ", self.client_id, mean_squared_error) log(INFO, "Client %s : root_mean_squared_error %f ", self.client_id, root_mean_squared_error) return loss, num_examples_test, {"mean_absolute_error": mean_absolute_error, "mean_squared_error": mean_squared_error, "root_mean_squared_error": root_mean_squared_error} def randclientid(s,c): c=string.ascii_uppercase + string.digits return ''.join(random.choice(c) for x in range(s)) ## Loading input data def dataLoad(jsonfile): with open(jsonfile, 'r') as file: data = json.load(file) server_ip=str(data["server_IP"]) server_port=str(data["server_port"]) model_name=str(data["model_name"]) problem_type=str(data["problem_type"]) data_location=str(data["data_location"]) # deploy_location=str(data["deploy_location"]) model_params=data["model_hyperparams"] train_size=int(data["train_size"]) model_version=str(data["version"]) selected_feature=data["selected_feature"] if (type(selected_feature) is str): selected_feature=selected_feature.split(',') model_overwrite=data['model_overwrite'] target_feature=data["target_feature"] num_records=int(data['num_records_per_round']) wait_time=data['wait_time'] server_address=server_ip+':'+server_port # server_address=f"{server_ip}:{server_port}" return server_address,model_name,problem_type,data_location,model_params,model_version,selected_feature,target_feature,train_size,num_records,wait_time,model_overwrite # def getfilepath() """ Main aion federated learning client function call. """ if __name__ == "__main__": ##Client random id gen. rand_id=randclientid(9, "ABC1234567890") client_id='flclient-'+str(rand_id) try: json_file=sys.argv[1] except Exception as e: # sys.stdout.write("Please provide input configuration file. example: < python.exe 'fedclient\aionflc.py' 'fedclient\config.json' > ") log(INFO, "Please provide input configuration file. example: <python.exe 'fedclient\aionflc.py' 'fedclient\config.json'> \n") server_address,model_name,problem_type,data_location,model_params,model_version,selected_feature,target_feature,train_size,num_records,wait_time,model_overwrite = dataLoad(json_file) file_name=model_name+'_'+model_version+".log" cwd = os.path.abspath(os.path.dirname(__file__)) log_location = os.path.join(cwd, 'logs') try: os.makedirs(log_location) except FileExistsError as fe: # here,log_location already exists pass try: logobj = logging.getLogger('AION') fl_log=os.path.normpath(os.path.join(log_location,file_name)) log(INFO, "flclient log file path: %s ",str(fl_log)) logging.basicConfig(filename=fl_log,format='%(asctime)s %(message)s', filemode='w',level=logging.DEBUG) except Exception as e: log(INFO, "logging error. Error Msg: %s ",str(e)) pass ## default data location ~data\inputfile.csv data_location = os.path.normpath(os.path.join(cwd, data_location)) df = pd.read_csv(data_location) df =df[~df.isin([np.nan, np.inf, -np.inf]).any(axis=1)] df=df.reset_index(drop=True) y=df[target_feature] # X = df.drop(target_feature, axis=1) # # print("selected_feature: \n",selected_feature) X=df[selected_feature] input_shape = X.shape[1] # len(selected_feature) output_shape = len(y.value_counts()) test_size=(100-train_size)/100 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size) no_classes = len(df.groupby(target_feature).count()) no_features=len(selected_feature) ## Pass the train data. (X_train, y_train) = utils.partition(X_train, y_train, 1)[0] scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_test_scaled = scaler.transform(X_test) # y_train = pd.get_dummies(y_train) # y_test = pd.get_dummies(y_test) y_train_dl = pd.get_dummies(y_train, sparse=True) y_test_dl = pd.get_dummies(y_test, sparse=True) if (problem_type.lower() == "classification"): if (model_name.lower() == "logisticregression"): #n_classes = df[target_feature].nunique() no_classes = len(df.groupby(target_feature).count()) no_features=len(selected_feature) logger.info("no_classes: "+str(no_classes)) logger.info("no_features: "+str(no_features)) modelName="logisticregression" model = None model = LogisticRegression(**model_params, warm_start=True) try: status=utils.setmodelName(model_name) utils.set_initial_params(model,no_classes,no_features) except Exception as e: print("util error: \n",e) num_rounds=round(len(df)/num_records) log(INFO, "Federated learning Client connecting to Server @: %s ",str(server_address)) try: flower.client.start_numpy_client(server_address=server_address, client=aionflc(model,num_rounds,model_name,model_version,wait_time,client_id,num_records,model_overwrite,problem_type,X_train_scaled, X_test_scaled, y_train, y_test)) except Exception as e: logger.info("AION FL Client instance error: \n"+str(e)) log(INFO, "AION federated learning Client %s execution completed.",str(client_id)) elif (model_name.lower() == "deeplearning"): optimizer = model_params["optimizer"] loss_func = model_params["losses"] act_func = model_params["activation"] last_act_func = model_params["last_activation"] input_shape = X.shape[1] # len(selected_feature) output_shape = len(y.value_counts()) print(f"input_shape:{input_shape}, output_shape:{output_shape}.") model = None if output_shape == 2: if last_act_func == "sigmoid" and loss_func == "binary_crossentropy": model = dl_model.dl_binary_classification(input_shape, output_shape, optimizer, loss_func, act_func, last_act_func) elif last_act_func == "softmax" and loss_func == "categorical_crossentropy": model = dl_model.dl_binary_classification(input_shape, output_shape, optimizer, loss_func, act_func, last_act_func) else: model = dl_model.dl_multiClass_classification(input_shape, output_shape, optimizer, loss_func, act_func, last_act_func) print(model.summary()) # status=utils.setmodelName(modelName) # utils.set_initial_params(model,no_classes,no_features) num_rounds=round(len(df)/num_records) log(INFO, "Federated learning Client connecting to Server @: %s ",str(server_address)) try: flower.client.start_numpy_client(server_address=server_address, client=aionflc_dl(model,num_rounds,model_name,model_version,wait_time,client_id,num_records,model_overwrite,problem_type,X_train_scaled, X_test_scaled, y_train_dl, y_test_dl,model_params)) except Exception as e: logger.info("AION FL Client instance error: \n"+str(e)) log(INFO, "AION federated learning Client %s execution completed.",str(client_id)) logger.info("AION federated learning Client execution completed."+str(client_id)) elif(problem_type.lower() == "regression"): if (model_name.lower() == "linearregression"): # model=LinearRegression(**model_params,warm_start=True) if model_params['fit_intercept'] == 'True': model_params['fit_intercept'] = True else: model_params['fit_intercept'] = False if model_params['copy_X'] == 'True': model_params['copy_X'] = True else: model_params['copy_X'] = False if model_params['positive'] == 'True': model_params['positive'] = True else: model_params['positive'] = False model=LinearRegression(**model_params) status=utils.setmodelName(model_name) utils.set_initial_params_reg(model,X_train.shape[0],X_train.shape[1]) num_rounds=round(len(df)/num_records) log(INFO, "Federated learning Client connecting to Server @: %s ",str(server_address)) try: flower.client.start_numpy_client(server_address=server_address, client=aionflc(model,num_rounds,model_name,model_version,wait_time,client_id,num_records,model_overwrite,problem_type,X_train_scaled, X_test_scaled, y_train, y_test)) except Exception as e: logger.info("AION FL Client instance error: \n"+str(e)) log(INFO, "AION federated learning Client %s execution completed.",str(client_id)) elif(model_name.lower() == "deeplearning"): input_shape = X.shape[1] # len(selected_feature) output_shape = len(y.value_counts()) optimizer = model_params["optimizer"] loss_func = model_params["losses"] act_func = model_params["activation"] model = None model = dl_model.dl_regression_model(input_shape, 1, optimizer, loss_func, act_func) num_rounds=round(len(df)/num_records) log(INFO, "Federated learning Client connecting to Server @: %s ",str(server_address)) try: flower.client.start_numpy_client(server_address=server_address, client=aionflc_dl(model,num_rounds,model_name,model_version,wait_time,client_id,num_records,model_overwrite,problem_type,X_train_scaled, X_test_scaled, y_train, y_test,model_params)) except Exception as e: logger.info("AION FL Client instance error: \n"+str(e)) log(INFO, "AION federated learning Client %s execution completed.",str(client_id))
utils.py
from typing import Tuple, Union, List import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LinearRegression from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import SGDClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from flwr.common.logger import log from logging import INFO XY = Tuple[np.ndarray, np.ndarray] Dataset = Tuple[XY, XY] LogRegParams = Union[XY, Tuple[np.ndarray]] XYList = List[XY] modelUsed=None modelname=None def setmodelName(modelselected): try: modelname=str(modelselected) print("setmodelName ,given modelname: \n",modelname) if (modelname.lower() == 'logisticregression'): modelUsed=LogisticRegression() return True elif (modelname.lower() == "linearregression"): modelUsed = LinearRegression() return True elif (modelname.lower() == "sgdclassifier"): #from sklearn.linear_model import SGDClassifier modelUsed=SGDClassifier() return True elif (modelname.lower() == "knn"): modelUsed = KNeighborsClassifier() return True elif (modelname.lower() == "decisiontreeclassifier"): modelUsed = DecisionTreeClassifier() return True else: return False except Exception as e: log(INFO, "set fl model name fn issue: ",e) def get_model_parameters(model:modelUsed) -> LogRegParams: """Returns the paramters of a sklearn LogisticRegression model.""" model_name=model.__class__.__name__ if model.fit_intercept: params = (model.coef_, model.intercept_) else: params = (model.coef_,) return params def set_model_params( model:modelUsed, params: LogRegParams ) -> modelUsed: """Sets the parameters of a sklean LogisticRegression model.""" model.coef_ = params[0] model_name=model.__class__.__name__ try: if model.fit_intercept: model.intercept_ = params[1] except Exception as e: log(INFO, "set_model_params fn issue: ",e) pass return model def set_initial_params_reg(model,no_vals,no_features): """Sets initial parameters as zeros Required since model params are uninitialized until model.fit is called. But server asks for initial parameters from clients at launch. Refer to sklearn.linear_model.LogisticRegression documentation for more information. """ no_vals = no_vals n_features = no_features # model.classes_ = np.array([i for i in range(n_classes)]) model.coef_ = np.zeros( n_features,) model_name=model.__class__.__name__ try: if model.fit_intercept: # model.intercept_ = np.ones((no_vals,1)) model.intercept_ = np.zeros((no_vals,)) except Exception as e: log(INFO, "set_initial_params fn issue: ",e) pass def set_initial_params(model,no_classes,no_features): """Sets initial parameters as zeros Required since model params are uninitialized until model.fit is called. But server asks for initial parameters from clients at launch. Refer to sklearn.linear_model.LogisticRegression documentation for more information. """ n_classes = no_classes n_features = no_features model.classes_ = np.array([i for i in range(n_classes)]) model.coef_ = np.zeros((n_classes, n_features)) model_name=model.__class__.__name__ try: if model.fit_intercept: model.intercept_ = np.zeros((n_classes,)) except Exception as e: log(INFO, "set_initial_params fn issue: ",e) pass def shuffle(X: np.ndarray, y: np.ndarray) -> XY: """Shuffle X and y.""" rng = np.random.default_rng() idx = rng.permutation(len(X)) return X[idx], y[idx] def partition(X: np.ndarray, y: np.ndarray, num_partitions: int) -> XYList: """Split X and y into a number of partitions.""" return list( zip(np.array_split(X, num_partitions), np.array_split(y, num_partitions)) )
parameters.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' from hyperopt import fmin, tpe, hp, STATUS_OK, Trials import numpy as np import logging import sys import os class parametersDefine(): def __init__(self): self.paramDict = None self.log = logging.getLogger('eion') def getParamSpaceSize(self,paramDict): size=1 if(len(paramDict)==0): return 0 for keys in paramDict.keys(): size=size*len(paramDict[keys]) return size def paramDefine(self, paramSpace, method): paramDict = {} for j in list(paramSpace.keys()): inp = paramSpace[j] try: isLog = False isLin = False isRan = False isList = False isString = False try: # check if functions are given as input and reassign paramspace v = paramSpace[j] if 'logspace' in paramSpace[j]: paramSpace[j] = v[v.find("(") + 1:v.find(")")].replace(" ", "") isLog = True elif 'linspace' in paramSpace[j]: paramSpace[j] = v[v.find("(") + 1:v.find(")")].replace(" ", "") isLin = True elif 'range' in paramSpace[j]: paramSpace[j] = v[v.find("(") + 1:v.find(")")].replace(" ", "") isRan = True elif 'list' in paramSpace[j]: paramSpace[j] = v[v.find("(") + 1:v.find(")")].replace(" ", "") isList = True elif '[' and ']' in paramSpace[j]: paramSpace[j] = v.split('[')[1].split(']')[0].replace(" ", "") isList = True x = paramSpace[j].split(',') except Exception as e: if isinstance(paramSpace[j], (int, float)): paramSpace[j] = str(paramSpace[j]) x = [] x.append(paramSpace[j]) str_arg = paramSpace[j] # check if arguments are string try: test = eval(x[0]) except: isString = True if isString: paramDict.update({j: hp.choice(j, x)} if method == 'bayesopt' else {j: x}) else: res = eval(str_arg) if isLin: y = eval('np.linspace' + str(res)) paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))} if method == 'bayesopt' else {j: y}) elif isLog: y = eval('np.logspace' + str(res)) paramDict.update( {j: hp.uniform(j, 10 ** eval(x[0]), 10 ** eval(x[1]))} if method == 'bayesopt' else {j: y}) elif isRan: y = eval('np.arange' + str(res)) paramDict.update({j: hp.choice(j, y)} if method == 'bayesopt' else {j: y}) # check datatype of argument elif isinstance(eval(x[0]), bool): y = list(map(lambda i: eval(i), x)) paramDict.update({j: hp.choice(j, eval(str(y)))} if method == 'bayesopt' else {j: y}) elif isinstance(eval(x[0]), float): res = eval(str_arg) if len(str_arg.split(',')) == 3 and not isList: y = eval('np.linspace' + str(res)) #print(y) paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))} if method == 'bayesopt' else {j: y}) else: y = list(res) if isinstance(res, tuple) else [res] paramDict.update({j: hp.choice(j, y)} if method == 'bayesopt' else {j: y}) else: res = eval(str_arg) if len(str_arg.split(',')) == 3 and not isList: y = eval('np.linspace' +str(res)) if eval(x[2]) >= eval(x[1]) else eval('np.arange'+str(res)) else: y = list(res) if isinstance(res, tuple) else [res] paramDict.update({j: hp.choice(j, y)} if method == 'bayesopt' else {j: y}) except Exception as inst: self.log.info('\n-----> Parameter parsing failed!!!.' + str(inst)) self.log.info("The entered parameter is invalid: {"+ j +':'+ inp+'}') exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) raise return paramDict
machinelearning.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import warnings warnings.filterwarnings('ignore') import logging import sklearn from sklearn.neighbors import NearestNeighbors from sklearn.cluster import KMeans from sklearn.cluster import DBSCAN from random import sample from numpy.random import uniform import numpy as np import math import pickle import os from math import isnan from sklearn.preprocessing import binarize from sklearn.preprocessing import LabelEncoder from sklearn.metrics import davies_bouldin_score from utils.file_ops import save_csv_compressed from sklearn.metrics import silhouette_score try: from sklearn.metrics import calinski_harabasz_score as calinski_harabaz_score except: from sklearn.metrics import calinski_harabaz_score import pandas as pd from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.metrics import roc_curve, auc from sklearn.metrics import roc_auc_score from sklearn.metrics import matthews_corrcoef from sklearn.metrics import brier_score_loss from sklearn.preprocessing import LabelBinarizer from sklearn.model_selection import train_test_split from sklearn.decomposition import LatentDirichletAllocation from learner.classificationModel import ClassifierModel from learner.regressionModel import RegressionModel from sklearn.metrics import r2_score from sklearn.metrics import mean_absolute_error,make_scorer from sklearn.metrics import mean_squared_error from sklearn.metrics import RocCurveDisplay, auc, roc_curve import matplotlib.pyplot as plt #print("1") #from alibi.explainers import ALE,plot_ale #pd.set_option('display.max_columns', 10) #pd.set_option('display.width', None) def get_prediction( model, loaded_model, xtrain, xtest=None): train_prob = None test_prob = None predictedData = [] if xtest.empty: is_xtest = False else: is_xtest = True if model.lower() == 'lda': if is_xtest: predictedData = loaded_model.transform(xtest).argmax(axis=1) trainPredictedData = loaded_model.transform(xtrain) elif model.lower() == 'dbscan': if is_xtest: predictedData = loaded_model.fit_predict(xtest) predictedData = loaded_model.labels_ trainPredictedData = loaded_model.fit_predict(xtrain) trainPredictedData = loaded_model.labels_ elif model == 'Neural Architecture Search': train_prob = estimator.predict(xtrain) if train_prob.shape[1] == 1: train_prob = np.hstack(( 1-train_prob, train_prob)) trainPredictedData = np.argmax(train_prob, axis=1) if is_xtest: test_prob = estimator.predict(xtest) if test_prob.shape[1] == 1: test_prob = np.hstack(( 1-test_prob, test_prob)) predictedData = np.argmax(test_prob, axis=1) elif model in ['Deep Q Network','Dueling Deep Q Network']: from tf_agents.trajectories import time_step from tensorflow import constant q, _ = loaded_model(np.array(xtrain), step_type=constant([time_step.StepType.FIRST] * np.array(xtrain).shape[0]), training=False) train_prob = q.numpy() if train_prob.shape[1] == 1: train_prob = np.hstack(( 1-train_prob, train_prob)) trainPredictedData = np.argmax(train_prob, axis=1) predictedData = np.argmax(test_prob, axis=1) if is_xtest: q,_ = loaded_model(np.array(xtest), step_type=constant([time_step.StepType.FIRST] * np.array(xtest).shape[0]), training=False) test_prob = q.numpy() if test_prob.shape[1] == 1: test_prob = np.hstack(( 1-test_prob, test_prob)) predictedData = np.argmax(test_prob, axis=1) else: if is_xtest: predictedData = loaded_model.predict(xtest) trainPredictedData = loaded_model.predict(xtrain) if hasattr(loaded_model, 'predict_proba'): train_prob = loaded_model.predict_proba(xtrain) if is_xtest: test_prob = loaded_model.predict_proba(xtest) return trainPredictedData, predictedData, train_prob, test_prob class machinelearning(object): def __init__(self): self.features=[] self.log = logging.getLogger('eion') self.plots = [] def cluster_tendency(self,featureData): self.log.info("\n------------- Cluster Tendency Check -------------") d = featureData.shape[1] n = len(featureData) m = int(0.1 * n) nbrs = NearestNeighbors(n_neighbors=1).fit(featureData.values) rand_X = sample(range(0, n, 1), m) ujd = [] wjd = [] for j in range(0, m): u_dist, _ = nbrs.kneighbors(uniform(np.amin(featureData,axis=0),np.amax(featureData,axis=0),d).reshape(1, -1), 2, return_distance=True) ujd.append(u_dist[0][1]) if isinstance(featureData.iloc[rand_X[j]].values, pd.core.arrays.sparse.array.SparseArray): featureData_reshaped = np.asarray(featureData.iloc[rand_X[j]].values).reshape(1, -1) else: featureData_reshaped = featureData.iloc[rand_X[j]].values.reshape(1, -1) w_dist, _ = nbrs.kneighbors(featureData_reshaped, 2, return_distance=True) wjd.append(w_dist[0][1]) try: clusetTendency = sum(ujd) / (sum(ujd) + sum(wjd)) except: clusetTendency = 0 if isnan(clusetTendency): clusetTendency = 0 self.log.info("-------> Cluster Tendency value using Hopkins Statistic: "+str(clusetTendency)) self.log.info("------------- Cluster Tendency Check End-------------\n") return (clusetTendency) def calculateNumberofCluster(self,featureData): self.log.info("\n------------- Calculate Number of Cluster -------------") Sum_of_squared_distances = [] K = range(1,15) for k in K: km = KMeans(n_clusters=k) km = km.fit(featureData) Sum_of_squared_distances.append(km.inertia_) x1, y1 = 1, Sum_of_squared_distances[0] x2, y2 = 15, Sum_of_squared_distances[len(Sum_of_squared_distances)-1] distances = [] for inertia in range(len(Sum_of_squared_distances)): x0 = inertia+2 y0 = Sum_of_squared_distances[inertia] numerator = abs((y2-y1)*x0 - (x2-x1)*y0 + x2*y1 - y2*x1) denominator = math.sqrt((y2 - y1)**2 + (x2 - x1)**2) distances.append(numerator/denominator) n_clusters=distances.index(max(distances)) + 2 self.log.info("-------> n_clusters: "+str(n_clusters-1)) self.log.info("------------- Calculate Number of Cluster End-------------\n") return(n_clusters-1) def getclusterMatrix(self,featureData,targetData): silhouetteAvg = silhouette_score(featureData,targetData) self.log.info("-------> SilHouette_Avg: "+str(silhouetteAvg)) daviesBouldinScore=davies_bouldin_score(featureData, targetData) self.log.info("-------> DaviesBouldinScore: "+str(daviesBouldinScore)) calinskiHarabazScore=calinski_harabaz_score(featureData,targetData) self.log.info("-------> CalinskiHarabazScore: "+str(calinskiHarabazScore)) matrix = '"SilHouette_Avg":'+str(silhouetteAvg)+',"DaviesBouldinScore":'+str(daviesBouldinScore)+',"CalinskiHarabazScore":'+str(calinskiHarabazScore) return(matrix) def get_regression_matrix(self,targetData,predictedData): try: r2score=r2_score(targetData, predictedData) self.log.info('-------> R2_score :'+str(r2score)) except Exception as e: self.log.info('\n--------- r2_score ',str(e)) r2score = 0 try: meanabsoluteerror=(mean_absolute_error(targetData, predictedData)) self.log.info('-------> MAE :'+str(meanabsoluteerror)) except Exception as e: self.log.info('\n---------Error: meanabsoluteerror ',str(e)) meanabsoluteerror = 0 try: meanssquatederror=mean_squared_error(targetData, predictedData) self.log.info('-------> MSE :'+str(meanssquatederror)) except Exception as e: self.log.info('\n---------Error: meanssquatederror ',str(e)) meanssquatederror = 0 try: rootmeanssquatederror=mean_squared_error(targetData, predictedData,squared=False) self.log.info('-------> RMSE :'+str(rootmeanssquatederror)) except Exception as e: self.log.info('\n---------Error: rootmeanssquatederror ',str(e)) rootmeanssquatederror = 0 try: normalised_rmse_percentage = (rootmeanssquatederror/ ( np.max(targetData) - np.min(targetData) )) * 100 self.log.info('-------> Normalised RMSE percentage :'+str(normalised_rmse_percentage)) except Exception as e: self.log.info('\n---------Error: Normalised RMSE percentage ',str(e)) normalised_rmse_percentage = -1 try: targetArray, predictedArray = np.array(targetData), np.array(predictedData) try: EPSILON = 1e-10 meanpercentageerror=np.mean(np.abs((targetArray - predictedArray) / (targetArray+EPSILON)))*100 except ZeroDivisionError: meanpercentageerror = 0 self.log.info('-------> MAPE :'+str(meanpercentageerror)) except Exception as e: self.log.info('\n---------Error: meanpercentageerror ',str(e)) meanpercentageerror = 0 matrix = '"MAE":'+str(round(meanabsoluteerror,2))+',"R2Score":'+str(round(r2score,2))+',"MSE":'+str(round(meanssquatederror,2))+',"MAPE":'+str(round(meanpercentageerror,2))+',"RMSE":'+str(round(rootmeanssquatederror,2))+',"Normalised RMSE(%)":'+str(round(normalised_rmse_percentage,2)) return matrix def getClassificationPerformaceMatrix(self,le_trainY,predictedData,prob,labelMaps): setOfyTrue = set(le_trainY) unqClassLst = list(setOfyTrue) if len(unqClassLst) <= 20: if str(labelMaps) != '{}': inv_mapping_dict = {v: k for k, v in labelMaps.items()} unqClassLst2 = (pd.Series(unqClassLst)).map(inv_mapping_dict) unqClassLst2 = list(unqClassLst2) else: unqClassLst2 = unqClassLst indexName = [] columnName = [] targetnames=[] for item in unqClassLst2: indexName.append("act:"+str(item)) columnName.append("pre:"+str(item)) targetnames.append(str(item)) matrixconfusion = pd.DataFrame(confusion_matrix(le_trainY,predictedData, labels = unqClassLst),index = indexName, columns = columnName) pd.set_option('display.max_columns',len(targetnames)+2) self.log.info('-------> Confusion Matrix: ') self.log.info(matrixconfusion) pd.reset_option('display.max_columns') classificationreport = pd.DataFrame(classification_report(le_trainY, predictedData, labels = unqClassLst,target_names=targetnames,output_dict=True)).transpose() self.log.info('-------> Classification Report: ') self.log.info(classificationreport) matrixconfusion = matrixconfusion.to_json(orient='index') classificationreport = classificationreport.to_json(orient='index') else: #bugid: 14540 self.log.info('-------> As the number of class is more than 20, skipping the creation of confusion_matrix and classification Report') return "" lb = LabelBinarizer() lb.fit(le_trainY) transformTarget= lb.transform(le_trainY) if transformTarget.shape[-1] == 1: transformTarget = le_trainY prob = np.delete( prob, 0, 1) rocaucscore = roc_auc_score(transformTarget,prob,average="macro") brier_score = None mcc_score = matthews_corrcoef(le_trainY,predictedData) if len(unqClassLst) > 2: brier_score = np.mean(np.sum(np.square(prob - transformTarget), axis=1)) else: brier_score = brier_score_loss(transformTarget,prob) self.log.info('-------> ROC AUC SCORE :'+str(rocaucscore)) self.log.info(f'-------> Matthews correlation coefficient SCORE : {mcc_score}') self.log.info(f'-------> BRIER SCORE : {brier_score}') matrix = f'"ConfusionMatrix": {matrixconfusion},"ClassificationReport": {classificationreport},"ROC_AUC_SCORE": {rocaucscore},"MCC_SCORE": {mcc_score},"BRIER_SCORE": {brier_score}' return(matrix) def split_into_train_test_data(self,featureData,targetData,testPercentage,modelType='classification'): ''' if cvSplit == None: ''' self.log.info('\n-------------- Test Train Split ----------------') if testPercentage == 0: xtrain=featureData ytrain=targetData xtest=featureData ytest=targetData else: testSize=testPercentage/100 if modelType == 'regression': self.log.info('-------> Split Type: Random Split') xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,test_size=testSize,shuffle=True) else: try: self.log.info('-------> Split Type: Stratify Split') xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,stratify=targetData,test_size=testSize,shuffle=True) except: self.log.info('-------> Split Type: Random Split') xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,test_size=testSize,shuffle=True) self.log.info('Status:- !... Train / test split done: '+str(100-testPercentage)+'% train,'+str(testPercentage)+'% test') self.log.info('-------> Train Data Shape: '+str(xtrain.shape)+' ---------->') self.log.info('-------> Test Data Shape: '+str(xtest.shape)+' ---------->') self.log.info('-------------- Test Train Split End ----------------\n') ''' else: xtrain=featureData ytrain=targetData xtest=featureData ytest=targetData ''' return(xtrain,ytrain,xtest,ytest) def checkForClassBalancing(self,targetData): imbalancedCount=0 valueCount=targetData.value_counts() self.log.info("---------- Checking for Class Imbalance on Train Data---------") self.log.info("-------> Categories and Count:") self.log.info(valueCount) categoryList=valueCount.keys().tolist() categoryCountList=valueCount.tolist() for i in range(0,len(categoryCountList)): if float(categoryCountList[i])<=float(0.5*max(categoryCountList)): self.log.info("-------> Found Imbalanced class: '"+str(categoryList[i])+"' Count: "+str(categoryCountList[i])) imbalancedCount=imbalancedCount+1 if imbalancedCount == 0: self.log.info("-------> Status: Balanced") self.log.info('Status:- |... Check for Data balancing done: Balanced') else: self.log.info("-------> Status: Unbalanced") self.log.info('Status:- |... Check for Data balancing done: Unbalanced') self.log.info("---------- Checking for Class Imbalance on Train Data End---------") return(imbalancedCount) def ExecuteClassBalancing(self,featureData,targetData,balancingMethod): from imblearn.over_sampling import SMOTE from imblearn.under_sampling import TomekLinks from collections import Counter self.log.info('\n------------ Balancing Start --------------') if balancingMethod.lower() == "oversample": self.log.info("-------> Method: SMOTE OverSampling Technique") k=1 seed=100 try: oversample = SMOTE(sampling_strategy='auto', k_neighbors=k, random_state=seed) balfeatureData, baltargetData = oversample.fit_resample(featureData, targetData) self.log.info(baltargetData.value_counts()) except Exception as inst: self.log.info("\n!!!!!!!!! OverSampling Fails "+str(inst)+" !!!!!!!!!!!!!!\n") balfeatureData = featureData baltargetData = targetData elif balancingMethod.lower() == "undersample": self.log.info("-------> Method: Tomelinks UnderSampling Technique") tLinks = TomekLinks() balfeatureData, baltargetData= tLinks.fit_resample(featureData, targetData) #Added for checking balancing act by the algorithm. counter = Counter(baltargetData) self.log.info("Class counter:\t"+str(baltargetData.value_counts())) max_class = max(counter,key=counter.get) max_value = max(counter.values()) self.log.info("Max samples: "+str(max_value)+ " in the class: "+str(max_class)) for k,v in counter.items(): if v < (max_value*98/100): self.log.info("Undersampling is not able to do perfect data balancing.") self.log.info("The method is used to identify the desired samples of data from the majority class that is having the lowest Euclidean distance with the minority class data. Downsampling may not balance the class after applying this method.\n") self.log.info(baltargetData.value_counts()) else: balfeatureData = featureData baltargetData = targetData self.log.info("-------> Method: Balancing Not Applied") self.log.info('-------> Memory Usage by Training DataFrame After Class Balancing '+str(featureData.memory_usage(deep=True).sum())) self.log.info('Status:- |... Data balancing done: '+str(balancingMethod)) self.log.info('------------ Balancing End --------------\n') return(balfeatureData,baltargetData) def combine_text_features(self,dataFrame,dataColumns): column_merge_flag = False merge_columns = [] if(len(dataColumns) > 1): dataFrame['combined'] = dataFrame[dataColumns].apply(lambda row: ' '.join(row.values.astype(str)), axis=1) merge_columns = dataColumns features = ['combined'] column_merge_flag = True self.log.info("After Text Concatenation") self.log.info(dataFrame['combined'].head(10)) self.log.info("List of Combined Columns ---> "+ str(dataColumns) +"\n") else: features = dataColumns return(dataFrame,features,column_merge_flag,merge_columns) ''' def create_preprocessing_pipeline(self,X): textDataProfilerObj=textDataProfiler() tfidfVector = TfidfVectorizer(tokenizer = textDataProfilerObj.textTokenizer) pipe = Pipeline([("cleaner", TextCleaner()),('vectorizer', tfidfVector)]) vectors=pipe.fit(X) transformedVector=pipe.transform(X) return(pipe,transformedVector) ''' def get_topics(self, model, feature_names, no_top_words): topicDict = {} for topic_idx, topic in enumerate(model.components_): wordDict = {} topicProb = [(feature_names[i],topic[i]/topic.sum()) for i in topic.argsort()[:-no_top_words - 1:-1]] for word, prob in topicProb: if word.endswith('_vect'): word = word[:-len('_vect')] wordDict[word] = prob topicDict[ topic_idx] = wordDict return topicDict def transform_target_feature(self,dataFrame,targetColumn): targetDataType=dataFrame[targetColumn].dtypes pandasNumericDtypes=['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] labelMapping= {} if targetDataType not in pandasNumericDtypes: le = LabelEncoder() le.fit(dataFrame[targetColumn]) le_trainY = le.transform(dataFrame[targetColumn]) labelMapping = dict(zip(le.classes_, le.transform(le.classes_))) self.log.info(" \n encoded Values of predicator column ===>"+str(labelMapping)) else: le_trainY = dataFrame[targetColumn] return le_trainY,labelMapping def setScoreParams(self,scoreParam,modelType,categoryCountList): if modelType == 'classification' or modelType == 'TextClassification': allowedmatrix = ['accuracy','recall','f1_score','precision','roc_auc'] if(scoreParam.lower() not in allowedmatrix): scoreParam = 'accuracy' elif scoreParam.lower() == 'none': scoreParam = 'accuracy' elif scoreParam.lower() == "recall": if len(categoryCountList) > 2: scoreParam = make_scorer(sklearn.metrics.recall_score, average = 'weighted') else: scoreParam = make_scorer(sklearn.metrics.recall_score) elif scoreParam.lower() == "precision" : if len(categoryCountList) > 2: scoreParam = make_scorer(sklearn.metrics.precision_score, average = 'weighted') else: scoreParam = make_scorer(sklearn.metrics.precision_score) elif scoreParam.lower() == "f1_score" : if len(categoryCountList) > 2: scoreParam = make_scorer(sklearn.metrics.f1_score, average = 'weighted') else: scoreParam = make_scorer(sklearn.metrics.f1_score) elif scoreParam.lower() == "roc_auc" : if len(categoryCountList) > 2: scoreParam = make_scorer(sklearn.metrics.roc_auc_score,needs_proba=True,multi_class='ovr',average='weighted') else: scoreParam = make_scorer(sklearn.metrics.roc_auc_score) else: scoreParam = scoreParam else: allowedmatrix = ['mse','r2','rmse','mae'] if(scoreParam.lower() not in allowedmatrix): scoreParam = 'neg_mean_squared_error' elif scoreParam.lower() == 'none': scoreParam = 'neg_mean_squared_error' elif scoreParam.lower() == 'mse': scoreParam = 'neg_mean_squared_error' elif scoreParam.lower() == 'rmse': #scoreParam = make_scorer(sklearn.metrics.mean_squared_error, squared = False) scoreParam='neg_root_mean_squared_error' elif scoreParam.lower() == 'mae': scoreParam = 'neg_mean_absolute_error' elif scoreParam.lower() == 'r2': scoreParam = 'r2' else: scoreParam = scoreParam #self.log.info('Status:- !... Scoring parameters selected') self.log.info("-------> Scoring parameter: "+str(scoreParam)) return(scoreParam) def getbestfeatureModel(self,modelType,scoreParam,score1,score2,model1,model2,threshold1,pscore1,rscore1,threshold2,pscore2,rscore2,featuresset1,featureset2): best_feature_model = featuresset1 self.log.info('\n ---------- ML Summary ------------') if modelType.lower() == "classification": if(threshold1 == -1 and threshold2 == -1): if score1> score2: self.log.info('-------> Best Features: '+str(featuresset1)) self.log.info('-------> Best Model: '+str(model1)) self.log.info('-------> Best Score: '+str(score1)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = featuresset1 else: self.log.info('-------> Best Features:'+str(featureset2)) self.log.info('-------> Best Model: '+str(model2)) self.log.info('-------> Best Score: '+str(score2)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = featureset2 elif(threshold1 == -1): self.log.info('-------> Best Features: '+str(featureset2)) self.log.info('-------> Best Model: '+str(model2)) self.log.info('-------> Best Score: '+str(score2)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = featureset2 elif(threshold1 == -2): self.log.info('-------> Best Features: '+str(featuresset1)) self.log.info('-------> Best Model: '+str(model1)) self.log.info('-------> Best Score: '+str(score1)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model =featuresset1 else: if pscore1 == pscore2: if rscore1 > rscore2: self.log.info('-------> Best Features: '+str(featuresset1)) self.log.info('-------> Best Model: '+str(model1)) self.log.info('-------> Best Score: '+str(score1)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = featuresset1 else: self.log.info('-------> Best Features: '+str(featureset2)) self.log.info('-------> Best Model: '+str(model2)) self.log.info('-------> Best Score: '+str(score2)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = featureset2 elif rscore1 == rscore2: if pscore1 > pscore2: self.log.info('-------> Best Features: '+str(featuresset1)) self.log.info('-------> Best Model: '+str(model1)) self.log.info('-------> Best Score: '+str(score1)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = featuresset1 else: self.log.info('-------> Best Features: '+str(featureset2)) self.log.info('-------> Best Model: '+str(model2)) self.log.info('-------> Best Score: '+str(score2)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = featureset2 elif modelType.lower() == "regression": if scoreParam == "r2" or scoreParam == "explained_variance": if score1> score2 : self.log.info('-------> Best Features: '+str(featuresset1)) self.log.info('-------> Best Model: '+str(model1)) self.log.info('-------> Best Score: '+str(score1)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = featuresset1 else: self.log.info('-------> Best Features: '+str(featureset2)) self.log.info('-------> Best Model: '+str(model2)) self.log.info('-------> Best Score: '+str(score2)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = featureset2 else: if score1< score2 : self.log.info('-------> Best Features: '+str(featuresset1)) self.log.info('-------> Best Model: '+str(model1)) self.log.info('-------> Best Score: '+str(score1)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = featuresset1 else: self.log.info('-------> Best Features: '+str(featureset2)) self.log.info('-------> Best Model: '+str(model2)) self.log.info('-------> Best Score: '+str(score2)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = featureset2 self.log.info('---------- ML Summary End ------------\n') return(best_feature_model) def startLearning(self,mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,topFeatures,modelFeatures,allFeatures,targetType,deployLocation,iterName,iterVersion,trained_data_file,predicted_data_file,labelMaps,featuresBasedOn,code_configure,featureEngineeringSelector,modelEvaluationConfig,imageFolderLocation): model = 'None' params = 'None' score = 0xFFFF estimator = None model_tried = '' threshold = -1 pscore = -1 rscore = -1 topics = {} if(targetColumn != ''): targetData = dataFrame[targetColumn] datacolumns=list(dataFrame.columns) if targetColumn in datacolumns: datacolumns.remove(targetColumn) if(modelType != 'clustering') and (modelType != 'TopicModelling'): scoreParam = self.setScoreParams(scoreParam,modelType,categoryCountList) if len(topFeatures) > 0: self.log.info('\n-------------- Training ML: Top/StatisticalBased Features Start --------------') modelbasedon = 'StatisticalBased' if featureEngineeringSelector.lower() == 'true': self.log.info('Status:- |... Algorithm analysis based on feature engineering based feature selection started') modelbasedon = 'DimensionalityReduction' else: self.log.info('Status:- |... Algorithm analysis based on statistical based feature selection started') model_type1,model1,params1, score1, estimator1,model_tried1,xtrain1,ytrain1,xtest1,ytest1,threshold1,pscore1,rscore1,method,topics=self.startLearnerModule(mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,topFeatures,targetType,deployLocation,iterName,iterVersion,trained_data_file,labelMaps,featuresBasedOn, modelbasedon,code_configure,modelEvaluationConfig) if model_tried != '': model_tried += ',' model_tried += model_tried1 topFeaturesStatus = True if featureEngineeringSelector.lower() == 'true': self.log.info('Status:- |... Algorithm analysis based on feature engineering based feature selection completed') else: self.log.info('Status:- |... Algorithm analysis for statistical based feature completed') self.log.info('-------------- Training ML: Top/StatisticalBased Features End --------------\n') else: topFeaturesStatus = False if len(modelFeatures) > 0: self.log.info('\n-------------- Training ML: Models Based Selected Features Start --------------') self.log.info('Status:- |... Algorithm analysis based on model based feature selection started') model_type2,model2,params2, score2, estimator2,model_tried2,xtrain2,ytrain2,xtest2,ytest2,threshold2,pscore2,rscore2,method,topics=self.startLearnerModule(mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,modelFeatures,targetType,deployLocation,iterName,iterVersion,trained_data_file,labelMaps,featuresBasedOn, "ModelBased",code_configure,modelEvaluationConfig) #model_tried2['Features'] = 'ModelBased' if model_tried != '': model_tried += ',' model_tried += model_tried2 modelFeaturesStatus = True self.log.info('Status:- |... Algorithm analysis for model based selected features completed') self.log.info('-------------- Training ML: Models Based Selected Features End --------------\n') else: modelFeaturesStatus = False if len(allFeatures) > 0: self.log.info('Status:- |... Algorithm analysis based on all features Start') model_type3,model3,params3, score3, estimator3,model_tried3,xtrain3,ytrain3,xtest3,ytest3,threshold3,pscore3,rscore3,method,topics=self.startLearnerModule(mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,allFeatures,targetType,deployLocation,iterName,iterVersion,trained_data_file,labelMaps,featuresBasedOn, "AllFeatures",code_configure,modelEvaluationConfig) #model_tried3['Features'] = 'AllFeatures' allFeaturesStatus = True if model_tried != '': model_tried += ',' model_tried += model_tried3 self.log.info('Status:- |... Algorithm analysis based all features completed') else: allFeaturesStatus = False #print(topFeaturesStatus,modelFeaturesStatus,allFeaturesStatus) if topFeaturesStatus: if modelFeaturesStatus: best_feature_model = self.getbestfeatureModel(modelType,scoreParam,score1,score2,model1,model2,threshold1,pscore1,rscore1,threshold2,pscore2,rscore2,'StatisticalBased','ModelBased') if best_feature_model == 'StatisticalBased' and allFeaturesStatus: best_feature_model = self.getbestfeatureModel(modelType,scoreParam,score1,score3,model1,model3,threshold1,pscore1,rscore1,threshold3,pscore3,rscore3,'StatisticalBased','AllFeatures') if best_feature_model == 'ModelBased' and allFeaturesStatus: best_feature_model = self.getbestfeatureModel(modelType,scoreParam,score2,score3,model2,model3,threshold2,pscore2,rscore2,threshold3,pscore3,rscore3,'ModelBased','AllFeatures') elif allFeaturesStatus: best_feature_model = self.getbestfeatureModel(modelType,scoreParam,score1,score3,model1,model3,threshold1,pscore1,rscore1,threshold3,pscore3,rscore3,'StatisticalBased','AllFeatures') else: best_feature_model = 'StatisticalBased' if featureEngineeringSelector.lower() == 'true': best_feature_model = 'DimensionalityReduction' else: if modelFeaturesStatus and allFeaturesStatus: best_feature_model = self.getbestfeatureModel(modelType,scoreParam,score2,score3,model2,model3,threshold2,pscore2,rscore2,threshold3,pscore3,rscore3,'ModelBased','AllFeatures') elif modelFeaturesStatus: best_feature_model = 'ModelBased' elif allFeaturesStatus: best_feature_model = 'AllFeatures' if (best_feature_model == 'StatisticalBased' or best_feature_model == 'DimensionalityReduction'): model_type = model_type1 model = model1 params = params1 score = score1 estimator = estimator1 #model_tried = model_tried1 xtrain = xtrain1 ytrain = ytrain1 xtest = xtest1 ytest = ytest1 features = topFeatures threshold = threshold1 pscore = pscore1 rscore = rscore1 elif (best_feature_model == 'AllFeatures'): model_type = model_type3 model = model3 params = params3 score = score3 estimator = estimator3 #model_tried = model_tried3 xtrain = xtrain3 ytrain = ytrain3 xtest = xtest3 ytest = ytest3 features = allFeatures threshold = threshold3 pscore = pscore3 rscore = rscore3 else: model_type = model_type2 model = model2 params = params2 score = score2 estimator = estimator2 #model_tried = model_tried2 xtrain = xtrain2 ytrain = ytrain2 xtest = xtest2 ytest = ytest2 threshold = threshold2 pscore = pscore2 rscore = rscore2 features = modelFeatures if score != 'NA': self.log.info('Status:- |... Final Best Algorithm selected: '+model+' having score='+str(round(score,2))+' based on '+best_feature_model+' feature selection') filename = os.path.join(deployLocation,'model',iterName+'_'+iterVersion+'.sav') saved_model = iterName+'_'+iterVersion+'.sav' if model == 'Neural Architecture Search': loaded_model = estimator try: estimator.save(filename, save_format="tf") except Exception: filename = os.path.join(deployLocation,'model','autoKerasModel.h5') estimator.save(filename) saved_model = 'autoKerasModel.h5' else: pickle.dump(estimator, open(filename, 'wb')) loaded_model = pickle.load(open(filename, 'rb')) if not xtest.empty: df_test = xtest.copy() else: df_test = xtrain.copy() if threshold == -1: if model.lower() == 'lda': predictedData = loaded_model.transform(xtest).argmax(axis=1) trainPredictedData = loaded_model.transform(xtrain) elif model.lower() == 'dbscan': predictedData = loaded_model.fit_predict(xtest) predictedData = loaded_model.labels_ trainPredictedData = loaded_model.fit_predict(xtrain) trainPredictedData = loaded_model.labels_ elif model == 'Neural Architecture Search': test_prob = estimator.predict(xtest) train_prob = estimator.predict(xtrain) if train_prob.shape[1] == 1: train_prob = np.hstack(( 1-train_prob, train_prob)) test_prob = np.hstack(( 1-test_prob, test_prob)) predictedData = np.argmax(test_prob, axis=1) trainPredictedData = np.argmax(train_prob, axis=1) elif model in ['Deep Q Network','Dueling Deep Q Network']: from tf_agents.trajectories import time_step from tensorflow import constant from sklearn.preprocessing import MinMaxScaler q, _ = loaded_model(np.array(xtest), step_type=constant([time_step.StepType.FIRST] * np.array(xtest).shape[0]), training=False) test_prob = MinMaxScaler().fit_transform( q.numpy()) q, _ = loaded_model(np.array(xtrain), step_type=constant([time_step.StepType.FIRST] * np.array(xtrain).shape[0]), training=False) train_prob = MinMaxScaler().fit_transform( q.numpy()) predictedData = np.argmax(test_prob, axis=1) trainPredictedData = np.argmax(train_prob, axis=1) elif modelType == 'clustering': if not xtest.empty: predictedData = loaded_model.predict(xtest) trainPredictedData = loaded_model.predict(xtrain) else: if not xtest.empty: predictedData = loaded_model.predict(xtest) trainPredictedData = loaded_model.predict(xtrain) if hasattr(loaded_model, 'predict_proba'): train_prob = loaded_model.predict_proba(xtrain) if not xtest.empty: test_prob = loaded_model.predict_proba(xtest) else: self.log.info("-------> Threshold :"+str(threshold)) if not xtest.empty: #bug 12437 if 'predict_proba' in dir(loaded_model): test_prob = loaded_model.predict_proba(xtest) predictedData = binarize(test_prob[:,1].reshape(-1, 1),threshold=threshold) else: raise Exception('--------- Loaded model does not support predict_proba ---------\n') train_prob = loaded_model.predict_proba(xtrain) trainPredictedData = binarize(train_prob[:,1].reshape(-1, 1),threshold=threshold) matrix = '' try: if(model_type == 'Classification'): self.log.info('\n--------- Performance Matrix with Train Data ---------') train_matrix = self.getClassificationPerformaceMatrix(ytrain,trainPredictedData,train_prob,labelMaps) self.log.info('--------- Performance Matrix with Train Data End ---------\n') if not xtest.empty: self.log.info('\n--------- Performance Matrix with Test Data ---------') performancematrix = self.getClassificationPerformaceMatrix(ytest,predictedData,test_prob,labelMaps) df_test['actual'] = ytest df_test['predict'] = predictedData self.log.info('--------- Performance Matrix with Test Data End ---------\n') matrix = performancematrix if hasattr( loaded_model, 'predict_proba'): predictedData_fit = loaded_model.predict_proba(xtest) elif model == 'Neural Architecture Search': predictedData_fit = estimator.predict(xtest) elif model in ['Deep Q Network','Dueling Deep Q Network']: from tf_agents.trajectories import time_step from tensorflow import constant q, _ = loaded_model(np.array(xtest), step_type=constant([time_step.StepType.FIRST] * np.array(xtest).shape[0]), training=False) predictedData_fit = q.numpy() else: predictedData_fit = loaded_model.predict(xtest) if predictedData_fit.shape[1] == 1: predictedData_fit = np.hstack((1 - predictedData_fit, predictedData_fit)) self.auc_roccurve(ytest,predictedData_fit,labelMaps,imageFolderLocation) else: df_test['actual'] = ytrain df_test['predict'] = trainPredictedData elif(model_type == 'Regression'): self.log.info('\n--------- Performance Matrix with Train Data ---------') train_matrix = self.get_regression_matrix(ytrain, trainPredictedData) self.log.info('--------- Performance Matrix with Train Data End ---------\n') if not xtest.empty: self.log.info('\n--------- Performance Matrix with Test Data ---------') matrix = self.get_regression_matrix(ytest, predictedData) df_test['actual'] = ytest df_test['predict'] = predictedData self.log.info('--------- Performance Matrix with Test Data End ---------\n') else: df_test['actual'] = ytrain df_test['predict'] = trainPredictedData elif(model_type == 'Clustering'): self.log.info('\n--------- Performance Matrix with Train Data ---------') train_matrix = self.getclusterMatrix(xtrain,trainPredictedData) self.log.info('--------- Performance Matrix with Train Data End ---------\n') self.log.info('\n--------- Performance Matrix with Test Data ---------') performacematrix = self.getclusterMatrix(xtest,predictedData) df_test['predict'] = predictedData self.log.info('--------- Performance Matrix with Test Data End ---------\n') matrix = performacematrix elif(model_type.lower() == 'topicmodelling'): self.log.info('\n--------- Performance Matrix with Train Data ---------') train_matrix = "" self.log.info('--------- Performance Matrix with Train Data End ---------\n') self.log.info('\n--------- Performance Matrix with Test Data ---------') performacematrix = "" df_test['predict'] = predictedData self.log.info('--------- Performance Matrix with Test Data End ---------\n') matrix = performacematrix except Exception as Inst: self.log.info('--------- Error Performance Matrix ---------\n') self.log.info(str(Inst)) df_test['predict'] = predictedData matrix = "" train_matrix = "" self.log.info('--------- Performance Matrix with Test Data End ---------\n') save_csv_compressed(df_test, predicted_data_file, encoding='utf-8') return 'Success',model_type,model,saved_model,matrix,train_matrix,xtrain.shape,model_tried,score,filename,features,threshold,pscore,rscore,method,estimator,xtrain,ytrain,xtest,ytest,topics,params def auc_roccurve(self,y_true,y_score,classee,imageFolderLocation): from keras.utils import to_categorical from sklearn.preprocessing import label_binarize import re n_classes = len(classee) y_true = to_categorical(y_true,num_classes = n_classes) fpr ={} tpr={} roc_auc={} class_names = list(classee.keys()) typeofclass = list(classee.values()) n_class = len(typeofclass) for i in range(n_classes): fpr[i],tpr[i],_ = roc_curve(y_true[:,i], y_score[:,i]) roc_auc[i]= auc(fpr[i],tpr[i]) plt.figure() plt.plot(fpr[i],tpr[i],label=f'{class_names[i]} (AUC = {roc_auc[i]:.2f})') plt.plot([0,1],[0,1], linestyle='--') plt.xlabel('False positive rate') plt.ylabel('True positive rate') plt.title(f'{class_names[i]} ROC Curve') plt.legend() img_location = os.path.join(imageFolderLocation,str(i)+'_roc.png') #15092 plt.savefig(img_location) def startLearnerModule(self,mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,topFeatures,targetType,deployLocation,iterName,iterVersion,trained_data_file,labelMaps,featuresBasedOn, modelFeatureBased,code_configure,modelEvaluationConfig): matrix = '' threshold = -1 pscore = -1 rscore = -1 method = mlconfig['optimizationMethod'] method = method.lower() geneticParam = '' topics = {} optimizationHyperParameter = mlconfig['optimizationHyperParameter'] cvSplit = optimizationHyperParameter['trainTestCVSplit'] nIter = int(optimizationHyperParameter['iterations']) if(method.lower() == 'genetic'): geneticParam = optimizationHyperParameter['geneticparams'] scoreParam = scoreParam if 'thresholdTunning' in mlconfig: thresholdTunning = mlconfig['thresholdTunning'] else: thresholdTunning = 'NA' if len(topFeatures) !=0: self.features=topFeatures else: datacolumns=list(xtrain.columns) if targetColumn in datacolumns: datacolumns.remove(targetColumn) self.features =datacolumns self.log.info(f'-------> Number of Features Used For Training the Model: {len(self.features)}') features_names = str(self.features) if len(features_names) > 500: features_names = ','.join(self.features[:2]) + ', ..... ,' + ','.join(self.features[-2:]) self.log.info(f'-------> Features Used For Training the Model: {features_names}') xtrain = xtrain[self.features] if not xtest.empty: xtest = xtest[self.features] if cvSplit == "": cvSplit =None else: cvSplit =int(cvSplit) if modelType == 'classification': model_type = "Classification" MakeFP0 = False MakeFN0 = False if(len(categoryCountList) == 2): self.log.info("\n -------------- Check for FP or FN -------------- ") self.log.info("-------> Binary Classification") if(thresholdTunning.lower() == 'fp0'): self.log.info("-------> Threshold Tuning: False Positive") MakeFP0 = True elif(thresholdTunning.lower() == 'fn0'): self.log.info("-------> Threshold Tuning: False Negative") MakeFN0 = True if MakeFP0 == False and MakeFN0 == False: self.log.info("-------> Threshold Tuning: Not Any") self.log.info("-------------- Check for FP or FN End-------------- \n") elif(len(categoryCountList) > 2): #bug 12438 self.log.info("\n -------------- Check for FP or FN -------------- ") self.log.info("-------> Multiclass Classification") if(thresholdTunning.lower() == 'fp0' or thresholdTunning.lower() == 'fn0'): self.log.info("-------> Threshold Tuning: Not supported") else: self.log.info("-------> Threshold Tuning: Not Any") self.log.info("-------------- Check for FP or FN End-------------- \n") objClf = ClassifierModel(modelList, modelParams, scoreParam, cvSplit, nIter,geneticParam, xtrain,ytrain,xtest,ytest,method,modelType,MakeFP0,MakeFN0,deployLocation) model, params, score, estimator,model_tried,threshold,pscore,rscore = objClf.classModelling( modelFeatureBased,code_configure) elif modelType == 'regression': model_type = "Regression" objClf = RegressionModel(modelList, modelParams, scoreParam, cvSplit, nIter,geneticParam, xtrain,ytrain,xtest,ytest,method,deployLocation) model,params,score,estimator,model_tried = objClf.regressionModelling(modelFeatureBased,code_configure) elif modelType =='clustering': model_type = 'Clustering' print(modelList) if 'KMeans' in modelList: clustendency = self.cluster_tendency(xtrain) model='KMeans' model_tried = '{"Model":"KMeans","Score":"NA"}' kmeanmodelparams=modelParams['KMeans'] n_clusters = kmeanmodelparams['n_clusters'] if n_clusters == None or n_clusters == 0 or n_clusters == '': n_clusters = self.calculateNumberofCluster(xtrain) kmeanmodelparams['n_clusters'] = n_clusters kmeans=KMeans(n_clusters=n_clusters) targetData=kmeans.fit_predict(xtrain) self.log.info('Status:- |... ML Algorithm applied: KMeans') self.log.info('\n------------ Centers Points Start------------') values = kmeans.cluster_centers_.squeeze() #print(values) centers = pd.DataFrame(kmeans.cluster_centers_,columns= xtrain.columns) filename = os.path.join(deployLocation,'centers.csv') centers.to_csv(filename) labels = kmeans.labels_ i=0 for value_row in values: j=0 self.log.info('------->Label: '+str(i)) for value in value_row: self.log.info('---------->Feature: "'+str(self.features[j])+'" Center Point: '+str(value)) j = j+1 i = i+1 self.log.info('------------ Centers Points Start------------\n') score='NA' scoreParam=None params=kmeanmodelparams estimator=kmeans if 'DBSCAN' in modelList: DBSCAN_ModelParams=modelParams['DBSCAN'] db = DBSCAN(eps=DBSCAN_ModelParams['eps'],min_samples = DBSCAN_ModelParams['min_samples']).fit(xtrain) #targetData=db.fit_predict(xtrain) self.log.info('Status:- |... ML Algorithm applied: DBSCAN') labels = db.labels_ n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0) n_noise_ = list(labels).count(-1) self.log.info('------->Labels: '+str(labels)) self.log.info('------->No Of Cluster: '+str(n_clusters_)) self.log.info('------->No Of Noise Point: '+str(n_noise_)) score='NA' scoreParam=None params='' estimator=db model='DBSCAN' model_tried = '{"Model":"DBSCAN","Score":"NA"}' elif modelType == 'topicmodelling': model_type = 'TopicModelling' model='LDA' model_tried = '{"Model":"LDA","Score":"NA"}' LDAmodelparams=modelParams['LDA'] n_topics = LDAmodelparams['n_topics'] n_words_per_topic = LDAmodelparams['n_words_per_topic'] if n_topics == None or n_topics == 0 or n_topics == '': n_topics = 10 LDAmodelparams['n_topics'] = n_topics if n_words_per_topic == None or n_words_per_topic == 0 or n_words_per_topic == '': n_words_per_topic = 10 LDAmodelparams['n_words_per_topic'] = n_words_per_topic lda = LatentDirichletAllocation(n_components=n_topics,random_state=0) self.log.info('Status:- |... ML Algorithm applied: LDA') targetData=lda.fit_transform(xtrain) topics = self.get_topics(lda, topFeatures, n_words_per_topic) self.log.info(topics) score='NA' scoreParam=None params=LDAmodelparams estimator=lda return model_type,model,params, score, estimator,model_tried,xtrain,ytrain,xtest,ytest,threshold,pscore,rscore,method, topics
anomalyDetectionAE.py
# -*- coding: utf-8 -*- #Py Libraries import import numpy as np import pandas as pd import tensorflow as tf import matplotlib import matplotlib.pyplot as plt from sklearn.metrics import accuracy_score from tensorflow.keras.optimizers import Adam,SGD # from sklearn.preprocessing import MinMaxScaler from tensorflow.keras import Model, Sequential from tensorflow.keras.layers import Dense, Dropout from sklearn.model_selection import train_test_split from tensorflow.keras.losses import MeanSquaredLogarithmicError import os import json import keras from keras.layers import Input, Dense from keras.callbacks import ModelCheckpoint, EarlyStopping from sklearn.cluster import DBSCAN from sklearn.model_selection import RandomizedSearchCV from sklearn import metrics import traceback import seaborn as sns import warnings warnings.filterwarnings("ignore") import logging import joblib from sklearn import preprocessing ''' Py Engine: >= Python 3.8> anomalyDetectionAE class purpose: To findout anomalies in the user data using autoencoder mechanism. The file contains base class anomalyDetectionAE which act as entry point . mainAnomalyDetectionfn() method decide which algorithm (autoencoder or dbscan to be used based on user input). Two different approaches to find out anomalies, 1.LSTM approach (anomalyDetectionAE.aetsmodel_lstm() fn) 2. Autoencoder approach (AeDetector class) By default, autoencoder approach used (User can select in basic config file.) One more limitation is, time series data will be handled by autoencoder,lstm algs. DBScan algorithm is not supporting time series. Suggestion here is if time series data feature received, drop it. AION dataprofiler automatically remove time feature for DBScan. But for autoencoder we are passing time series. Parameter information: df: Input dataframe from aion base class paramSpace: Default parameter from basic config (user config settings) deployLocation: Deployment location. Detailed anomalies data information are stored in ../target/output loc. Target: only for supervised problems. anomalyMethod: Algorithm to be used (autoEncoder or DBScan) received from paramSpace. testSize: For supervised problems, unsupervised problems we are passing whole input data. datetimeFeature: data time feature for autoencoder mv_featurebased_ad_status: If <True> univariate feature for autoencoder enabled. We findout anomalies for each features selected by user. ''' ##For autoencoder (encoder-decoder) based base class, keras Model class act as meta class. Config params received from AION config file (GUI). The below class instance have keras subclassing call to run encoder and decoder. class AeDetector(Model): def __init__(self,train,test,units,latent_units,activation): super(AeDetector, self).__init__() #Because anomaly detection ,we r using 'sigmoid' activation for all problems last_layer_activation='sigmoid' self.encoder = tf.keras.Sequential([ Dense(units, activation=activation), Dense((units/2), activation=activation), Dense(latent_units, activation=activation) ]) self.decoder = tf.keras.Sequential([ Dense((units/2), activation=activation), Dense(units, activation=activation), Dense(train.shape[1], activation=last_layer_activation) ]) ## Using keras subclassing api def call(self, x): encoded = self.encoder(x) decoded = self.decoder(encoded) return decoded ##This below function create get_datetime class python file in target->scripts folder '''This aion_gettimegranularity class is used to retrive the time pattern (Granularity) of given datetime feature.''' class aion_gettimegranularity: cls_name="datetimeinformation" def __init__(self,dataframe, datetimefeature): self.df=dataframe self.datetimefeature=datetimefeature # self.log=logging.getLogger('AION') self.log = logging.getLogger('eion') self.log.info("To retrive the granularity of given datetime feature by aion.") def get_dfinfo(self,df): from io import StringIO buf = StringIO() df.info(buf=buf) #self.log.info(buf.getvalue()) return buf.getvalue() ##Main time granularity find function def get_granularity(self): try: ##get local df df_t=self.df buf_info=self.get_dfinfo(df_t) self.log.info(buf_info) df_t.drop(df_t.filter(regex='Unname'),axis=1,inplace=True) try: df_t[self.datetimefeature] = pd.to_datetime(df_t[self.datetimefeature]) except Exception as e: self.log.info("Datetime feature to python datetime format convertion error.\n"+str(e)) df_t['time_diff']=df_t[self.datetimefeature].diff().shift(-1) datetime_mean=df_t['time_diff'].mean() totsec = datetime_mean.total_seconds() ## Dict variable to store datetime details.Initialized all date param as False. status_time={"h":False,"m":False,"s":False, "us":False,"ns":False,"Y":False,"M":False,"D":False} if (datetime_mean.days == 0): if (totsec/3600 > 1): ## hour status_time['h']=True else: if (totsec/60 >1): ## minute status_time['m']=True else: if (totsec <= 1e-06 and totsec > 1e-09): ## microsecond status_time['us']=True elif (totsec<= 1e-09 and totsec >=1e-012): ## nanosecond status_time['ns']=True else: ## second status_time['s']=True else: days=datetime_mean.days if (days/365>1): ## year status_time['Y']=True else: if (days>30): ## month status_time['M']=True else: ## day status_time['D']=True time_pattern=None for k,v in status_time.items(): if (v == True): time_pattern=k self.log.info("<----- DateTime feature pattern (year/month/day/hour/minute/second/millisecond/microsecond/nanosecond) is: \t"+str(time_pattern)) try: try: df_t[self.datetimefeature] = pd.to_datetime(df_t[self.datetimefeature]) except Exception as e: pass df_t['Time_diff'] = ((df_t[self.datetimefeature])).diff(-1).dt.floor('T').dt.total_seconds().div(60).abs() time_threshold=1 df_t['anomalyType'] = (np.where((df_t['Time_diff'] != 1),"Point","Sequence")) df_t.drop("Time_diff",axis=1,inplace=True) except Exception as e: self.log.info("time_diff err message: "+str(e)) except Exception as e: print("time_diff err message: ",str(e)) return df_t ## AION Anomaly detection autoencoder main class. It receives input params from anomalyDetector class class anomalyDetectionAE: def __init__(self,df,paramSpace,deployLocation,target,anomalyMethod,testSize,datetimeFeature,mv_featurebased_ad_status): self.mc=None self.es=None #aion gui inputs self.df=df self.paramSpace=paramSpace self.deployLocation=deployLocation self.target=target self.anomalyMethod=anomalyMethod self.testSize=round(testSize,1) self.datetimeFeature=datetimeFeature self.log = logging.getLogger('eion') self.mv_featurebased_ad_status=mv_featurebased_ad_status """ Uncomment below for debug purpose. """ # self.log.info("anomalyDetectionAE constructor: df head: \n"+str(df.head())) # self.log.info("anomalyDetectionAE constructor: df type: \n"+str(type(df))) # self.log.info("anomalyDetectionAE constructor: df len: \n"+str(len(df))) # self.log.info("anomalyDetectionAE constructor: self.datetimeFeature: \n"+str((self.datetimeFeature))) def targetnumClass(self,data): count_classes = pd.value_counts(data, sort = True) num_of_classes= len(count_classes) return num_of_classes def configload(self): cwd = os.path.abspath(os.path.dirname(__file__)) file_name='config.json' try: config_file=os.path.normpath(os.path.join(cwd,'config',file_name)) except Exception as e: self.log.info("<---- config path error. Error Msg: ---->"+str(e)) with open(config_file, 'r') as file: data = json.load(file) datapath=str(data["data"]) target=str(data["target"]) anomaly_algorithm=str(data["anomalydetection_algorithm"]) ae_hyperparameter=data["autoEncoder"] dbscan_hyperparameter=data["DBScan"] return datapath,target,ae_hyperparameter,anomaly_algorithm,dbscan_hyperparameter ## model summary def summary(self,model): return model.summary() ##To load ae model def model_load(self, path): cwd = os.path.abspath(os.path.dirname(__file__)) file_name=path try: model_location=os.path.normpath(os.path.join(cwd,'model',file_name)) except Exception as e: self.log.info("<---- Model path error. Error Msg: ---->"+str(e)) loaded_model = joblib.load(model_location) return loaded_model ## Load dataset def dataload(self,datapath): cwd = os.path.abspath(os.path.dirname(__file__)) file_name=datapath try: data_file=os.path.normpath(os.path.join(cwd,'data',file_name)) except Exception as e: self.log.info("<---- data path error. Error Msg:: ---->"+str(e)) df = pd.read_csv(data_file) return df ## Create dataframe with time sequence data, if not time series, sequence length always 1. def create_dataset(self,X, y, time_steps=1): Xs, ys = [], [] for i in range(len(X) - time_steps): v = X.iloc[i:(i + time_steps)].values Xs.append(v) ys.append(y.iloc[i + time_steps]) return np.array(Xs), np.array(ys) ## model for time series based AE encoder, decoder fn def aetsmodel_lstm(self,n_dims, n_timesteps, n_bottleneck,units,activation,df): # inputs = Input(shape = (n_timesteps, n_dims)) inputs = Input(shape = (df.shape[1], df.shape[2])) e = keras.layers.LSTM(units, activation = activation, return_sequences = True)(inputs) ## code layer or compressed form of data produced by the autoencoder, bottleneck layer latent_space = keras.layers.LSTM(n_bottleneck, activation = activation, return_sequences = False, name = 'bottleneck_layer')(e) e = keras.layers.RepeatVector(n_timesteps)(latent_space) decoder = keras.layers.LSTM(n_bottleneck, activation = activation, return_sequences = True)(e) decoder = keras.layers.LSTM(units, activation = activation, return_sequences = True)(decoder) outputs = keras.layers.TimeDistributed(Dense(n_dims))(decoder) model = Model(inputs = inputs, outputs = outputs) return model ## adding some model checkpoints to ensure the best values will be saved and early stopping to prevent the model from running unnecessary. def callbacks(self, **kwargs): self.mc = ModelCheckpoint(filepath = kwargs.get("filename"), save_best_only = True, verbose = 0) self.es = EarlyStopping(monitor = kwargs.get("monitor"), patience = kwargs.get("patience")) return self.es,self.mc ##This below function create get_datetime class python file in target->scripts folder '''This aion_gettimegranularity class is used to retrive the time pattern (for getting time granularity) of given datetime feature.''' def create_datetime_pyfile(self): try: datetimepattern_code=r"""## import pandas as pd import numpy as np class aion_gettimegranularity: cls_name="datetimeinformation" def __init__(self,dataframe, datetimefeature): self.df=dataframe self.datetimefeature=datetimefeature def get_dfinfo(self,df): from io import StringIO buf = StringIO() df.info(buf=buf) #print(buf.getvalue()) return buf.getvalue() def get_granularity(self): try: ##get local df df_t=self.df buf_info=self.get_dfinfo(df_t) df_t.drop(df_t.filter(regex='Unname'),axis=1,inplace=True) try: df_t[self.datetimefeature] = pd.to_datetime(df_t[self.datetimefeature]) except Exception as e: pass # print("Datetime feature to python datetime format convertion error.\n",e) df_t['time_diff']=df_t[self.datetimefeature].diff().shift(-1) datetime_mean=df_t['time_diff'].mean() totsec = datetime_mean.total_seconds() ## Dict variable to store datetime details.Initialized all date param as False. status_time={"h":False,"m":False,"s":False,"us":False,"ns":False,"Y":False,"M":False,"D":False} if (datetime_mean.days == 0): if (totsec/3600 > 1): ## hour status_time['h']=True else: if (totsec/60 >1): ## minute status_time['m']=True else: if (totsec <= 1e-06 and totsec > 1e-09): ## microsecond status_time['us']=True elif (totsec<= 1e-09 and totsec >=1e-012): ## nanosecond status_time['ns']=True else: ## second status_time['s']=True else: days=datetime_mean.days if (days/365>1): ## year status_time['Y']=True else: if (days>30): ## month status_time['M']=True else: ## day status_time['D']=True time_pattern=None for k,v in status_time.items(): if (v == True): time_pattern=k #print("<----- DateTime feature pattern (year/month/day/hour/minute/second/millisecond/microsecond/nanosecond) is: \t",(time_pattern)) try: try: df_t[self.datetimefeature] = pd.to_datetime(df_t[self.datetimefeature]) except Exception as e: pass df_t['Time_diff'] = ((df_t[self.datetimefeature])).diff(-1).dt.floor('T').dt.total_seconds().div(60).abs() time_threshold=1 df_t['anomalyType'] = np.where((df_t['Time_diff'] != 1),"Point","Sequence") df_t.drop("Time_diff",axis=1,inplace=True) except Exception as e: print("time_diff err message: ",str(e)) except Exception as e: pass # print("get_granularity err msg: ",(e)) return df_t """ cwd=self.deployLocation file_name='aion_granularity'+'.py' try: data_file=os.path.normpath(os.path.join(cwd,'script',file_name)) with open(data_file,'w') as file: file.write(datetimepattern_code) except Exception as error: self.log.info("<---- datetimepattern_code write Error.: ---->"+str(error)) self.log.info("datetimepattern source code created at target folder...\n") except Exception as error: self.log.info("<---- datetimepattern_code function Error.: ---->"+str(error)) ## Simple mlp based autoencoder model, not used now. # def aetsmodel_lstm(self,X_train): # model = keras.Sequential() # # autoencoder encoder # model.add(keras.layers.LSTM( # units=64, # input_shape=(X_train.shape[1], X_train.shape[2]) # )) # model.add(keras.layers.Dropout(rate=0.2)) # model.add(keras.layers.RepeatVector(n=X_train.shape[1])) # # autoencoder decoder # model.add(keras.layers.LSTM(units=64, return_sequences=True)) # model.add(keras.layers.Dropout(rate=0.2)) # model.add( # keras.layers.TimeDistributed( # keras.layers.Dense(units=X_train.shape[2]) # ) # ) # return model ## To find optimal anomaly threshold value def find_threshold(self,model, x_train_scaled): reconstructions = model.predict(x_train_scaled) # provides losses of individual instances msle reconstruction_errors = tf.keras.losses.mae(reconstructions, x_train_scaled) # threshold for anomaly scores threshold = np.mean(reconstruction_errors.numpy())+ 2*np.std(reconstruction_errors.numpy()) return threshold ## compiling the model with adam optimizer and mean squared error loss def model_compile(self, model,lr, loss, opt): if opt == "adam": opt = Adam(learning_rate = lr) else: opt = SGD(learning_rate = lr) model.compile(loss = loss, optimizer = opt) ## save anomaly points in aion target folder def save_anomalyvalues(self,df,file_name): # cwd = os.path.abspath(os.path.dirname(__file__)) cwd=self.deployLocation file_name=file_name+'.csv' try: out_path=os.path.normpath(os.path.join(cwd,'output')) if not os.path.isdir(out_path): os.makedirs(out_path) data_file=os.path.normpath(os.path.join(cwd,'output',file_name)) except Exception as error: self.log.info("<---- autoencoder artifact_dir path. Error Msg: ---->"+str(error)) try: df.to_csv(data_file,index=False) except Exception as e: self.log.info("<---- Saving log data frame error. Error Msg: ---->"+str(e)) ## model summary def summary(self,model): return model.summary() ##Method to find subsequence and point anomalies aion_gettimegranularity def find_point_subsequence_anomalies(self,datetime_column,dataframe=None): try: dataframe.reset_index(level=0, inplace=True) try: dataframe[datetime_column] = pd.to_datetime(dataframe[datetime_column]) except Exception as e: self.log.info("Dataframe contains no datetime feature.Err.Msg: \n"+str(e)) pass try: ##Below commented part using normalize with time delta, find point anomalies.But not used,just for reference. ##get day to check difference #date_f = dataframe[datetime_column].dt.normalize() ##compare successive rows and identify group size #dataframe['anomaly_value'] = np.where(dataframe[datetime_column].groupby(date_f.ne(date_f.shift()).cumsum()).transform('size').gt(1),'subsequence_anomaly', 'Point_anomaly') ##Using get_timepattern method aion_gettimegranularity_obj=aion_gettimegranularity(dataframe,datetime_column) anomaly_info_df=aion_gettimegranularity_obj.get_granularity() except Exception as e: self.log.info("find_point_subsequence_anomalies,: aion_gettimegranularity err msg:: \n"+str(e)) self.log.info("find_point_subsequence_anomalies,: anomaly_info_df: \n"+str(anomaly_info_df)) except Exception as e: self.log.info("find_point_subsequence_anomalies,: err msg:: \n"+str(e)) return anomaly_info_df ## Auto encoder time series function call ## dataframe info() not working for py logging, so workaround we can get information in buffer and log it. def get_df_info(self,df): from io import StringIO buf = StringIO() df.info(buf=buf) #self.log.info(buf.getvalue()) return buf.getvalue() ## Method to detect time series based anomalies in user data. Using both lstm and dense based autoencoder approaches. def aionAEAnomalyTS(self,df,test_size_perc,target,time_steps,dropout,mv_unique_feature_ad): ae_hyperparameter=self.paramSpace anomaly_algorithm=self.anomalyMethod # test_size=float(self.testSize) test_size=0.0 # train_size=1-test_size train_size=1-test_size # train_size_perc=train_size*100 train_size=int(len(df) * train_size) try: timeseries_layers=ae_hyperparameter['timeseries_layers'] ## Here we are checking whether to use only LSTM layers for dnn or dense layers. Dense layers better for predicting point as well sequence anomalies in time series. if (timeseries_layers.lower() == 'lstm'): try: ## Need to get normalized data for threshold calculation. data_mean=df.mean(axis=0) data_std=df.std(axis=0) data=(df-data_mean)/data_std # train, test = df[:train_size], df[train_size:] train, test = data[:train_size], data[train_size:] test=train test1=test ## Need to copy test data train_index=train.index test_index=test.index cols = df.columns # train, test = train_test_split(df, test_size=test_size,random_state=42) X_train, y_train = self.create_dataset( train, train, time_steps ) X_test, y_test = self.create_dataset( test, test, time_steps ) n_dims=X_train.shape[2] n_timesteps=X_train.shape[1] opt=ae_hyperparameter['optimizer'] loss_fn=ae_hyperparameter["loss"] epochs=int(ae_hyperparameter['epochs']) batch_size=int(ae_hyperparameter['batch_size']) learning_rate=float(ae_hyperparameter['learning_rate']) n_bottleneck=int(ae_hyperparameter['latentspace_size']) units=int(ae_hyperparameter['hidden_units']) activation=ae_hyperparameter['activation'] ##For task 20731 minimum_threshold_user = str(ae_hyperparameter['min_threshold']) maximum_threshold_user = str(ae_hyperparameter['max_threshold']) autoencoder=self.aetsmodel_lstm(n_dims, n_timesteps, n_bottleneck,units,activation,X_train) ##To save file # cwd = os.path.abspath(os.path.dirname(__file__)) cwd=self.deployLocation try: artifact_dir=os.path.normpath(os.path.join(cwd,'output','autoenc_artifact_dir')) if not os.path.isdir(artifact_dir): os.makedirs(artifact_dir) except Exception as e: self.log.info("<---- Autoencoder artifact_dir path error. Error Msg: ---->"+str(e)) #dl callback fn to get best loss fn, early stopping & model checkpoint call backs es,mc=self.callbacks(filename = artifact_dir, patience = 5, monitor = "val_loss") self.model_compile(autoencoder,learning_rate, loss_fn, opt) X_train = np.reshape(X_train,(X_train.shape[0],X_train.shape[1],X_train.shape[2])) X_test = X_test.reshape((X_test.shape[0], X_test.shape[1],n_dims)) # y_test = y_test.reshape((y_test.shape[0], y_test.shape[1], n_dims)) model_hist = autoencoder.fit( X_train, X_train, epochs=epochs, batch_size=batch_size, validation_split=0.1, shuffle=False,callbacks = [mc, es] ) model_info=self.summary(autoencoder) X_train_pred = autoencoder.predict(X_train) train_mae_loss = np.mean(np.abs(X_train_pred - X_train), axis=1) ## Task 20731 if ((minimum_threshold_user and minimum_threshold_user.strip()) and (maximum_threshold_user and maximum_threshold_user.strip())): threshold = float(maximum_threshold_user) min_threshold = float(minimum_threshold_user) elif ((minimum_threshold_user.strip()) and (not maximum_threshold_user.strip())): threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss) min_threshold = float(minimum_threshold_user) elif ((not minimum_threshold_user.strip()) and (maximum_threshold_user.strip())): threshold = float(maximum_threshold_user) min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss) else: threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss) min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss) # threshold = np.mean(train_mae_loss) + np.std(train_mae_loss) self.log.info("Anomaly threshold max value based on loss fn (MAE): "+str(threshold)) self.log.info("Anomaly threshold min value based on loss fn (MAE): "+str(min_threshold)) X_test_pred = autoencoder.predict(X_test) test_mae_loss = np.mean(np.abs(X_test_pred - X_test), axis=1) test_score_df = pd.DataFrame(index=test_index[time_steps:]) if (n_dims >1): columns = [f'loss_{num}' for num in range(n_dims)] # test_score_df = pd.DataFrame(test_mae_loss, columns=columns, index=test_index[time_steps:]) test_score_df['loss'] = test_mae_loss.mean(axis=1) else: test_score_df['loss'] = test_mae_loss test_score_df['max_threshold'] = threshold test_score_df['min_threshold'] = min_threshold test_score_df['anomaly_value'] = (test_score_df.loss > test_score_df.max_threshold) test_score_df['anomaly_value'] = (test_score_df.loss < test_score_df.min_threshold) ## Newly added for lstm issue ## if coming dataframe have datetime index , copy it before concat (different indexed dfs) import pandas.api.types as ptypes # if (isinstance(test_score_df, pd.DatetimeIndex) and isinstance(df, pd.DatetimeIndex)): test_cp_index=None if (ptypes.is_datetime64_dtype(test_score_df.index) and ptypes.is_datetime64_dtype(df.index)): # self.log.info("test_score_df and df have datetime index cols") test_cp_index=test_score_df.index df_cp_index=df.index test_score_df=test_score_df.reset_index() df=df.reset_index() ##self.datetimeFeature test_score_df.dropna() try: test_score_df[self.datetimeFeature]=pd.to_datetime(test_score_df[self.datetimeFeature]) df[self.datetimeFeature]=pd.to_datetime(df[self.datetimeFeature]) except: pass try: final_df=pd.DataFrame() cols_to_use = df.columns.difference(test_score_df.columns) final_df = pd.merge(test_score_df, df[cols_to_use], left_index=True, right_index=True, how='inner') except Exception as e: self.log.info("final_df creation err msg: \n: "+str(e)) else: test_index=test_score_df.reset_index(drop=True) test_cp_index=test_index.index df_index=df.reset_index(drop=True) final_df=pd.DataFrame() final_df = test_score_df.join(df) final_df.dropna() ##Again set datetime index to dataframes,drop datetime feature column and set it as index. try: final_df.set_index(self.datetimeFeature,inplace=True) df.set_index(self.datetimeFeature,inplace=True) df.drop(self.datetimeFeature,axis=1,inplace=True) final_df.drop(self.datetimeFeature,axis=1,inplace=True) except: pass ## Below commented code used to print df.info() in log file (using get_df_info() methos). # self.log.info("anomaly final_df info: \n") # buf_info=self.get_df_info(final_df) # self.log.info(buf_info) # final_df=pd.DataFrame() ##Getback the datetime index back final_df.index=test_cp_index normal_prediction_df=test_score_df.loc[test_score_df['anomaly_value']==False] anomaly_prediction_df=test_score_df.loc[test_score_df['anomaly_value']==True] ## Newly added for lstm issue anomaly_prediction_df=pd.merge(anomaly_prediction_df, final_df, on=['loss', 'max_threshold','min_threshold', 'anomaly_value'], how="left") # anomaly_prediction_df.fillna(anomaly_prediction_df.mean(), inplace=True) anomaly_prediction_df['anomaly_value'] = anomaly_prediction_df['anomaly_value'].replace([np.inf, -np.inf], np.nan) # anomaly_prediction_df['anomaly_value'] = anomaly_prediction_df['anomaly_value'].replace([np.inf, -np.inf], np.nan) final_df['anomaly_value'] = final_df['anomaly_value'].replace([np.inf, -np.inf], np.nan) anomaly_prediction_df['anomaly_value'] = anomaly_prediction_df['anomaly_value'].replace({True: 1, False: 0}) final_df['anomaly_value'] = final_df['anomaly_value'].replace({True:1, False: 0}) #make sure no nan values after dataframe operations anomaly_prediction_df.dropna() final_df.dropna() # anomal_loss_threshold=anomaly_prediction_df #use if we want to save loss and threshold as dataframe info. self.log.info("Anomaly data with loss and threshold informations: \n"+str(anomaly_prediction_df)) """ Saving anomaly plots in target->output->anomaly_plot folder """ ## Goto if cond for multivariate whole dataset anomaly prediction, else goto else part for feature based ad prediction. if (mv_unique_feature_ad.lower()=='false'): for col in df.columns: df_subset = anomaly_prediction_df[col] fig, ax = plt.subplots() df[col].plot(legend=False, ax=ax) df_subset.plot(legend=False, ax=ax, color="r") plot_name=col ax.set_title(plot_name+"_Anomaly Data Plot") ax.set_xlabel("DateTime") ax.set_ylabel("Values") plot_name=plot_name+'_'+'anomalyplot.png' try: plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot')) if not os.path.isdir(plot_dir): os.makedirs(plot_dir) plotpath=str(plot_dir)+'/'+plot_name except Exception as e: self.log.info("<---- plot_dir path error. Error Msg: ---->"+str(e)) if os.path.exists(plotpath): os.remove(plotpath) plt.savefig(plotpath) # plt.savefig(str(plot_dir)+'/'+plot_name) plt.clf() plt.cla() plt.close() else: df_subset = anomaly_prediction_df fig, ax = plt.subplots() df.plot(legend=False, ax=ax) ax.set_title("Anomaly Data Plot") ax.set_xlabel("X values") ax.set_ylabel("Y Values") df_subset.plot(legend=False, ax=ax, color="r") plot_name=df.columns[0] ax.set_title(plot_name+"_Anomaly Data Plot") # ax.set_xlabel("DateTime") # ax.set_ylabel("Values") # plot_name=df.columns[0] plot_name=plot_name+'_'+'anomalyplot.png' try: plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot')) if not os.path.isdir(plot_dir): os.makedirs(plot_dir) plotpath=str(plot_dir)+'/'+plot_name except Exception as e: self.log.info("<---- plot_dir path error. Error Msg: ---->"+str(e)) if os.path.exists(plotpath): os.remove(plotpath) plt.savefig(plotpath) # plt.savefig(str(plot_dir)+'/'+plot_name) plt.clf() plt.cla() plt.close() #process dt feature and save anomalies. datetime_column=str(self.datetimeFeature) try: anomaly_prediction_df=self.find_point_subsequence_anomalies(datetime_column,anomaly_prediction_df) # normal_prediction_df=self.find_point_subsequence_anomalies(datetime_column,normal_prediction_df) except: ##If any issue in time series point anomaly detection, skip it. self.log.info("Detecting point anomalies have some issue,check datetime feature.") pass combined_df=pd.concat([anomaly_prediction_df,normal_prediction_df],ignore_index=True) combined_df['anomaly_value']=combined_df['anomaly_value'].fillna('Normal_Data') ## If categorical features in original df, then inverse transform the values. anomaly_prediction_df['anomaly_value'] = anomaly_prediction_df['anomaly_value'].replace({1: "Anomaly", 0: "Normal"}) final_df['anomaly_value'] = final_df['anomaly_value'].replace({1: "Anomaly", 0: "Normal"}) ##Now we are storing anomaly log (as dataframe) based on two options: 1. Anomalies based on all features, 2. Anomalies based on each individual feature. if (mv_unique_feature_ad.lower()=='true'): ## Multivariate and saving individual feature based anomalies self.save_anomalyvalues(anomaly_prediction_df,(str(feature_name)+'_ts_anomaly_dataframe')) # self.save_anomalyvalues(combined_df,(str(feature_name)+'_ts_overall_dataframe')) try: final_df=self.merge_pre_post_dfs(final_df) except Exception as e: self.log.info("Anomaly Detection Merge df exception:\n"+str(e)) #If merge fails, just out! pass self.save_anomalyvalues(final_df,(str(feature_name)+'_ts_overall_dataframe')) ## If we want anomaly dataframe with loss and threshold for each values (rows),please uncomment below line # self.save_anomalyvalues(anomal_loss_threshold,'ts_anomaly_dataframe_lt')) ## Save actual test data test_score_df #self.save_anomalyvalues(test_score_df,(str(feature_name)+'_testdata')) else: self.save_anomalyvalues(anomaly_prediction_df,'ts_anomaly_dataframe') # self.save_anomalyvalues(combined_df,'ts_normal_anomaly_dataframe') try: final_df=self.merge_pre_post_dfs(final_df) except Exception as e: self.log.info("Anomaly Detection Merge df exception:\n"+str(e)) #If merge fails, just out! pass self.save_anomalyvalues(final_df,'ts_overall_dataframe') ## If we want anomaly dataframe with loss and threshold for each values (rows),please uncomment below line # self.save_anomalyvalues(anomal_loss_threshold,'ts_anomaly_dataframe_lt')) ## Save actual test data test_score_df #self.save_anomalyvalues(test_score_df,'testdata') anomaly_info_df=final_df self.log.info("<---- Autoencoder time series data anomalies: ---->"+str(anomaly_prediction_df)) self.log.info("<---- Autoencoder time series:Number of anomalies in data:: ---->"+str(len(anomaly_prediction_df))) # return model except Exception as e: self.log.info("AD lstm traceback error: \n"+str(traceback.format_exc())) ## Dense layer based time series AD, most real world usecases, it is working best compared to lstm based.. elif (timeseries_layers.lower() == 'dense'): try: feature_name=df.columns feature_name = ' '.join(map(str, feature_name)) try: #Passing whole data,so test size set as zero. test_size=0.0 # train_size=1-test_size train_size=1-test_size # train_size_perc=train_size*100 train_size=int(len(df) * train_size) train_data,test_data = df[:train_size], df[train_size:] test_data=train_data except: #If any error comes,us sklearn train test split train_data,test_data = train_test_split(df,test_size=test_size,random_state=42) pass test_index=test_data.index ## to get datetime index units=int(ae_hyperparameter['hidden_units']) latent_units=int(ae_hyperparameter['latentspace_size']) activation=ae_hyperparameter['activation'] ##For task 20731 minimum_threshold_user = str(ae_hyperparameter['min_threshold']) maximum_threshold_user = str(ae_hyperparameter['max_threshold']) train_data=train_data.values test_data=test_data.values ## tss is time series flag, true or false autoencoder = AeDetector(train_data,test_data,units,latent_units,activation) opt=ae_hyperparameter['optimizer'] loss_fn=ae_hyperparameter["loss"] epochs=int(ae_hyperparameter['epochs']) batch_size=int(ae_hyperparameter['batch_size']) learning_rate=float(ae_hyperparameter['learning_rate']) cwd=self.deployLocation try: artifact_dir=os.path.normpath(os.path.join(cwd,'output','autoenc_artifact_dir')) if not os.path.isdir(artifact_dir): os.makedirs(artifact_dir) except Exception as e: self.log.info("<---- artifact_dir path error. Error Msg: ---->"+str(e)) es,mc=self.callbacks(filename = artifact_dir, patience = 5, monitor = "val_loss") self.model_compile(autoencoder,learning_rate, loss_fn, opt) # autoencoder.compile(optimizer='adam', loss='mae') autoencoder.fit(train_data, train_data, epochs = epochs, batch_size=batch_size, validation_data=(test_data, test_data),callbacks = [mc, es]) # reconstructed = autoencoder(train_data) reconstructed = autoencoder.predict(train_data) train_mae_loss = tf.keras.losses.mae(reconstructed, train_data) ## Task 20731 if ((minimum_threshold_user and minimum_threshold_user.strip()) and (maximum_threshold_user and maximum_threshold_user.strip())): threshold = float(maximum_threshold_user) min_threshold = float(minimum_threshold_user) elif ((minimum_threshold_user.strip()) and (not maximum_threshold_user.strip())): threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss) min_threshold = float(minimum_threshold_user) elif ((not minimum_threshold_user.strip()) and (maximum_threshold_user.strip())): threshold = float(maximum_threshold_user) min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss) else: threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss) min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss) # threshold = np.mean(train_mae_loss) + np.std(train_mae_loss) self.log.info("Anomaly threshold max value based on loss fn (MAE): "+str(threshold)) self.log.info("Anomaly threshold min value based on loss fn (MAE): "+str(min_threshold)) test_labels=None if (len(self.datetimeFeature) >= 1): time_series_data="True" else: time_series_data="False" pred,test_score_df,actual_data,anomaly_info_df = self.prediction(autoencoder, test_data,min_threshold, threshold,test_labels,time_series_data,time_steps,test_index) # normal_prediction_df=(anomaly_info_df[~anomaly_info_df['anomaly_value']]) normal_prediction_df=anomaly_info_df.loc[anomaly_info_df['anomaly_value']==False] anomaly_prediction_df=anomaly_info_df.loc[anomaly_info_df['anomaly_value']==True] #Below ts_dataframe_anomaly not for production, just for testing purpose. If uncommented, comment it. #self.save_anomalyvalues(anomaly_info_df,'ts_dataframe_normal') # anomal_loss_threshold=anomaly_prediction_df #use if we want to save loss and threshold as dataframe info. self.log.info("Anomaly data with loss and threshold informations: \n"+str(anomaly_prediction_df)) # anomaly_prediction_df_plot=anomaly_prediction_df """ Saving anomaly plots in target->output->anomaly_plot folder """ ## Only for multivariate (all features) based anomaly data plot ## Use of the below part if anomaly df columns came as numerical columns. # if not (df.columns.equals(anomaly_prediction_df.columns)): # num_cols = [] # try: # num_cols=[num_cols.append(float(col)) for col in anomaly_prediction_df.columns.values] # except ValueError: # pass # #Dense layer scaler conversion makes column names as int values, so here find the int cols and rename to original names. # if (num_cols): # anomaly_prediction_df=anomaly_prediction_df[num_cols] # anomaly_prediction_df.columns=df.columns # normal_prediction_df=normal_prediction_df[num_cols] # normal_prediction_df.columns=df.columns ## Goto if cond for multivariate whole dataset anomaly prediction, else goto else part for feature based ad prediction. if (mv_unique_feature_ad.lower()=='false'): # for col in df.columns: for col in actual_data.columns: df_subset = anomaly_prediction_df[col] fig, ax = plt.subplots() df[col].plot(legend=False, ax=ax) df_subset.plot(legend=False, ax=ax, color="r") plot_name=col ax.set_title(plot_name+"_Anomaly Data Plot") ax.set_xlabel("DateTime") ax.set_ylabel("Values") plot_name=plot_name+'_'+'anomalyplot.png' try: plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot')) if not os.path.isdir(plot_dir): os.makedirs(plot_dir) plotpath=str(plot_dir)+'/'+plot_name except Exception as e: self.log.info("<---- plot_dir path error. Error Msg: ---->"+str(e)) if os.path.exists(plotpath): os.remove(plotpath) plt.savefig(plotpath) # plt.savefig(str(plot_dir)+'/'+plot_name) plt.clf() plt.cla() plt.close() else: df_subset = anomaly_prediction_df fig, ax = plt.subplots() df.plot(legend=False, ax=ax) ax.set_title("Anomaly Data Plot") ax.set_xlabel("DateTime") ax.set_ylabel("Values") df_subset.plot(legend=False, ax=ax, color="r") plot_name=df.columns[0] ax.set_title(plot_name+"_Anomaly Data Plot") # ax.set_xlabel("DateTime") # ax.set_ylabel("Values") # plot_name=df.columns[0] plot_name=plot_name+'_'+'anomalyplot.png' try: plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot')) if not os.path.isdir(plot_dir): os.makedirs(plot_dir) plotpath=str(plot_dir)+'/'+plot_name except Exception as e: self.log.info("<---- plot_dir path error. Error Msg: ---->"+str(e)) if os.path.exists(plotpath): os.remove(plotpath) plt.savefig(plotpath) # plt.savefig(str(plot_dir)+'/'+plot_name) plt.clf() plt.cla() plt.close() datetime_column=str(self.datetimeFeature) # anomaly_prediction_df=self.find_point_subsequence_anomalies(datetime_column,anomaly_prediction_df) # normal_prediction_df=self.find_point_subsequence_anomalies(datetime_column,normal_prediction_df) try: anomaly_prediction_df=self.find_point_subsequence_anomalies(datetime_column,anomaly_prediction_df) # normal_prediction_df=self.find_point_subsequence_anomalies(datetime_column,normal_prediction_df) except: self.log.info("Detecting point anomalies have some issue,check datetime feature.") ##Just pass if datetime column provides issue, use without datetime column info pass combined_df=pd.concat([anomaly_prediction_df,normal_prediction_df],ignore_index=True) combined_df['anomaly_value']=combined_df['anomaly_value'].fillna('Normal_Data') ## If categorical features in original df, then inverse transform the values. try: # anomaly_info_df['anomaly_value']=anomaly_info_df['anomaly_value'].astype(str).replace(replace_values_F,'NormalDataPoint', regex=True) self.naming_anomalyvalues(anomaly_info_df) except Exception as e: self.log.info("anomaly_info_df exception err msg: \n"+str(e)) ##Now we are storing anomaly log (as dataframe) based on two options: 1. Anomalies based on all features, 2. Anomalies based on each individual feature. if (mv_unique_feature_ad.lower()=='true'): ## Multivariate and saving individual feature based anomalies self.save_anomalyvalues(anomaly_prediction_df,(str(feature_name)+'_ts_anomaly_dataframe')) try: anomaly_info_df=self.merge_pre_post_dfs(anomaly_info_df) except Exception as e: #If merge fails, just out!. self.log.info("Anomaly Detection Merge df exception :\n"+str(e)) finally: #check merging done or not, to be imp. pass self.save_anomalyvalues(anomaly_info_df,(str(feature_name)+'_ts_overall_dataframe')) '''For overall ordered output,uncomment the below.''' # self.save_anomalyvalues(combined_df,(str(feature_name)+'_ts_overall_dataframe_ordered')) ## If we want anomaly dataframe with loss and threshold for each values (rows),please uncomment below line # self.save_anomalyvalues(anomal_loss_threshold,'ts_anomaly_dataframe_lt') ## Save actual test data actual_data #self.save_anomalyvalues(actual_data,(str(feature_name)+'_testdata')) else: self.save_anomalyvalues(anomaly_prediction_df,'ts_anomaly_dataframe') try: anomaly_info_df=self.merge_pre_post_dfs(anomaly_info_df) except Exception as e: #If merge fails, just out!. self.log.info("Anomaly Detection Merge df exception :\n"+str(e)) finally: #check merging done or not, to be imp. pass self.save_anomalyvalues(anomaly_info_df,'ts_overall_dataframe') #Ordered data # self.save_anomalyvalues(combined_df,'ts_overall_dataframe_ordered') ## If we want anomaly dataframe with loss and threshold for each values (rows),please uncomment below line # self.save_anomalyvalues(anomal_loss_threshold,'ts_anomaly_dataframe_lt'), ## Save actual test data test_score_df #self.save_anomalyvalues(actual_data,'testdata') self.log.info("<---- Autoencoder time series anomalies : ---->"+str(anomaly_prediction_df)) self.log.info("<---- Autoencoder time series, Number of anomalies in data: ---->"+str(len(anomaly_prediction_df))) # self.save_anomalyvalues(anomaly_prediction_df,'ts_anomaly_dataframe') except Exception as e: self.log.info("dense layer anomaly error: \n"+str(traceback.format_exc())) else: self.log.info("Only LSTM and Dense layers supported for time series.") except Exception as e: self.log.info("<---- time series error msg: ---->"+str(e)) self.log.info("<---- time series error msg (detailed): ---->"+str(traceback.format_exc())) return autoencoder,anomaly_prediction_df,anomaly_info_df ## To normalize data,use when necessary def normalize_data(train_data,test_data): min_val=tf.reduce_min(train_data) max_val=tf.reduce_max(train_data) train_data = (train_data - min_val)/(max_val - min_val) test_data = (test_data - min_val)/(max_val - min_val) #converte the data into float train_data = tf.cast(train_data, dtype=tf.float32) test_data = tf.cast(test_data, dtype=tf.float32) return train_data,test_data ## Scaling data ,Not used because of our aion preprocessing data profiler option. use when necessary. def getScaledData(method='standard', train_df=None, test_df=None, feature_col='feature'): from sklearn.preprocessing import StandardScaler if method == 'standard': scaler = StandardScaler() else: scaler = MinMaxScaler() scaler = scaler.fit(train_df[[feature_col]]) train_df['scaled_'+feature_col] = scaler.transform(train_df[[feature_col]]) test_df['scaled_'+feature_col] = scaler.transform(test_df[[feature_col]]) return train_df, test_df, scaler ## prediction fn def prediction(self,model, data,min_threshold, threshold,test_labels,time_series_status,time_steps,test_index): # data1=scaler.inverse_transform(data) try: df_new=self.df.drop(self.datetimeFeature,axis=1,inplace=False) except: df_new=self.df try: actual_data=pd.DataFrame(self.df,columns=df_new.columns) except Exception as e: actual_data=pd.DataFrame(self.df) pass n_features=data.shape[1] self.log.info("prediction: number of features: \n"+str(n_features)) predicted_data = model.predict(data) loss = tf.keras.losses.mae(predicted_data, data) if (time_series_status.lower() == 'true'): test_score_df = pd.DataFrame(index=test_index) actual_data = actual_data.set_index(test_index) anomaly_info_df=pd.DataFrame() test_score_df['loss'] = loss test_score_df['max_threshold'] = threshold test_score_df['min_threshold'] = min_threshold ## Task 20731 #test_score_df['anomaly_value'] = test_score_df.apply(lambda x: x.loss > x.max_threshold or x.loss <= x.min_threshold, axis=1) test_score_df['anomaly_value'] = np.where((test_score_df["loss"] > test_score_df["max_threshold"]) | (test_score_df["loss"] <= test_score_df["min_threshold"]), True, False) anomaly_info_df = pd.concat([actual_data, test_score_df], axis=1) else: test_score_df = pd.DataFrame() anomaly_info_df=pd.DataFrame() test_score_df['loss'] = loss #test_score_df['threshold'] = threshold test_score_df['max_threshold'] = threshold test_score_df['min_threshold'] = min_threshold ## Task 20731 #test_score_df['anomaly_value'] = (test_score_df.loss >= test_score_df.max_threshold) #test_score_df['anomaly_value'] = (test_score_df.loss < test_score_df.min_threshold) test_score_df['anomaly_value'] = np.where((test_score_df["loss"] > test_score_df["max_threshold"]) | (test_score_df["loss"] <= test_score_df["min_threshold"]), True, False) anomaly_info_df = pd.concat([actual_data, test_score_df], axis=1) return tf.math.less(loss, threshold),test_score_df,actual_data,anomaly_info_df ##Not used now, for data ploting purpose # def plot(self,autoencoder,data, n): # enc_img = autoencoder.encoder(data) # dec_img = autoencoder.decoder(enc_img) # plt.plot(data[n], 'b') # plt.plot(dec_img[n], 'r') # plt.fill_between(np.arange(data.shape[1]), data[n], dec_img[n], color = 'lightcoral') # plt.legend(labels=['Input', 'Reconstruction', 'Error']) # plt.show() ## autoencoder fn for non timeseries data def ae_nontimeseriesmodelfn(self,df,target): autoencoder=None mv_unique_feature_ad=self.mv_featurebased_ad_status #For supervised non time series problems, we need to remove datetime feature. This will help scaler algs process the numeric data only. try: if (target == ''): try: test_size=0.0 # train_size=1-test_size train_size=1-test_size # train_size_perc=train_size*100 train_size=int(len(df) * train_size) train_data,test_data = df[:train_size], df[train_size:] test_data=train_data except: test_size=float(self.testSize) train_data,test_data = train_test_split(df,test_size=test_size,random_state=42) pass ae_hyperparameter=self.paramSpace units=int(ae_hyperparameter['hidden_units']) latent_units=int(ae_hyperparameter['latentspace_size']) activation=ae_hyperparameter['activation'] ##For task 20731 minimum_threshold_user = str(ae_hyperparameter['min_threshold']) maximum_threshold_user = str(ae_hyperparameter['max_threshold']) train_data=train_data.values test_data=test_data.values autoencoder = AeDetector(train_data,test_data,units,latent_units,activation) opt=ae_hyperparameter['optimizer'] loss_fn=ae_hyperparameter["loss"] # loss_fn='binary_crossentropy' epochs=int(ae_hyperparameter['epochs']) batch_size=int(ae_hyperparameter['batch_size']) learning_rate=float(ae_hyperparameter['learning_rate']) # autoencoder.save('../output/autoenc',save_format='tf') # cwd = os.path.abspath(os.path.dirname(__file__)) cwd=self.deployLocation try: artifact_dir=os.path.normpath(os.path.join(cwd,'output','autoenc_artifact_dir')) if not os.path.isdir(artifact_dir): os.makedirs(artifact_dir) except Exception as e: self.log.info("<---- artifact_dir path error. Error Msg: ---->"+str(e)) es,mc=self.callbacks(filename = artifact_dir, patience = 5, monitor = "val_loss") # es,mc=self.callbacks(filename = "../output/autoenc.sav", patience = 3, monitor = "val_loss") self.model_compile(autoencoder,learning_rate, loss_fn, opt) # autoencoder.compile(optimizer='adam', loss='mae') autoencoder.fit(train_data, train_data, epochs = epochs, batch_size=batch_size, validation_data=(test_data, test_data),callbacks = [mc, es]) reconstructed = autoencoder(train_data) train_mae_loss = tf.keras.losses.mae(reconstructed, train_data) #threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss) #min_threshold = np.mean(train_mae_loss)- 2*np.std(train_mae_loss) ## Task 20731 if ((minimum_threshold_user and minimum_threshold_user.strip()) and (maximum_threshold_user and maximum_threshold_user.strip())): threshold = float(maximum_threshold_user) min_threshold = float(minimum_threshold_user) elif ((minimum_threshold_user.strip()) and (not maximum_threshold_user.strip())): threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss) min_threshold = float(minimum_threshold_user) elif ((not minimum_threshold_user.strip()) and (maximum_threshold_user.strip())): threshold = float(maximum_threshold_user) min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss) else: threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss) min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss) # threshold = np.mean(train_mae_loss) + np.std(train_mae_loss) self.log.info("Anomaly Upper Threshold value based on loss fn (MAE): "+str(threshold)) self.log.info("Anomaly lower_threshold value based on loss fn (MAE): "+str(min_threshold)) test_labels=None ## No test labels passed pred,test_score_df,actual_data,anomaly_info_df = self.prediction(autoencoder, test_data, min_threshold,threshold,test_labels,'False',None,None) # normal_prediction_df=(anomaly_info_df[~anomaly_info_df['anomaly_value']]) normal_prediction_df=anomaly_info_df.loc[anomaly_info_df['anomaly_value']==False] anomaly_prediction_df=anomaly_info_df.loc[anomaly_info_df['anomaly_value']==True] self.log.info("<---- Autoencoder (non timeseries) based anomaly detection, anomalies in data: ---->"+str(anomaly_prediction_df)) self.log.info("<---- Number of anomalies in data: ---->"+str(len(anomaly_prediction_df))) self.save_anomalyvalues(anomaly_prediction_df,'anomaly_dataframe') # combined_df=pd.concat([anomaly_prediction_df,normal_prediction_df],ignore_index=True) self.log.info("<---- Autoencoder (non timeseries) based anomaly detection, overall data (both anomaly and non-anomaly ) in data: ---->"+str(anomaly_info_df)) # self.save_anomalyvalues(combined_df,'overall_dataframe') ## If categorical features in original df, then inverse transform the values. try: ##anomaly_info_df,total dataframe. self.naming_anomalyvalues(anomaly_info_df) except Exception as e: self.log.info("anomaly_info_df exception err msg: \n"+str(e)) ##Now we are storing anomaly log (as dataframe) based on two options: 1. Anomalies based on all features, 2. Anomalies based on each individual feature. if (mv_unique_feature_ad.lower()=='true'): ## Multivariate and saving individual feature based anomalies self.save_anomalyvalues(anomaly_prediction_df,(str(feature_name)+'_anomaly_dataframe')) try: anomaly_info_df=self.merge_pre_post_dfs(anomaly_info_df) except Exception as e: #If merge fails, just out!. self.log.info("Anomaly Detection Merge df exception :\n"+str(e)) finally: #check merging done or not, to be imp. pass self.save_anomalyvalues(anomaly_info_df,(str(feature_name)+'_overall_dataframe')) ## Save actual test data actual_data #self.save_anomalyvalues(actual_data,(str(feature_name)+'_testdata')) else: self.save_anomalyvalues(anomaly_prediction_df,'anomaly_dataframe') try: anomaly_info_df=self.merge_pre_post_dfs(anomaly_info_df) except Exception as e: #If merge fails, just out!. self.log.info("Anomaly Detection Merge df exception :\n"+str(e)) finally: #check merging done or not, to be imp. pass self.save_anomalyvalues(anomaly_info_df,'overall_dataframe') #Ordered data # self.save_anomalyvalues(combined_df,'ts_overall_dataframe_ordered') ## If we want anomaly dataframe with loss and threshold for each values (rows),please uncomment below line # self.save_anomalyvalues(anomal_loss_threshold,'ts_anomaly_dataframe_lt'), ## Save actual test data test_score_df #self.save_anomalyvalues(actual_data,'testdata') self.log.info("<---- Autoencoder non time series / supervised problem anomalies : ---->"+str(anomaly_prediction_df)) #ploting df_subset = anomaly_prediction_df fig, ax = plt.subplots() df.plot(legend=False, ax=ax) df_subset.plot(legend=False, ax=ax, color="r") ax.set_title("Anomaly Data Plot") ax.set_xlabel("DateTime") ax.set_ylabel("Values") plot_name='anomalyplot.png' try: plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot')) if not os.path.isdir(plot_dir): os.makedirs(plot_dir) plotpath=str(plot_dir)+'/'+plot_name except Exception as e: self.log.info("<---- plot_dir path error. Error Msg: ---->"+str(e)) if os.path.exists(plotpath): os.remove(plotpath) plt.savefig(plotpath) # plt.savefig(str(plot_dir)+'/'+plot_name) plt.clf() plt.cla() plt.close() else: y=df[target] X=df.drop(target, axis=1) train_data,test_data,train_labels,test_labels=train_test_split(X,y,test_size=0.2,random_state=42) count_classes = pd.value_counts(df[target], sort = True) num_of_classes= len(count_classes) self.log.info("train_data info: \n"+str(train_data.info())) if (num_of_classes >= 2): # scaler = StandardScaler() # train_data = scaler.fit_transform(train_data) # test_data = scaler.fit_transform(test_data) # self.saveScaler(scaler) train_labels = train_labels.astype(bool) test_labels = test_labels.astype(bool) n_train_data = train_data[train_labels] n_test_data = test_data[test_labels] # data1=scaler.inverse_transform(n_test_data) n_test_data_actual=pd.DataFrame(n_test_data) ##anomaly data an_train_data = train_data[~train_labels] an_test_data = test_data[~test_labels] n_train_data = train_data[train_labels] n_test_data = test_data[test_labels] ae_hyperparameter=self.paramSpace # autoencoder = AeDetector(n_train_data,n_test_data) activation=ae_hyperparameter['activation'] units=int(ae_hyperparameter['hidden_units']) latent_units=int(ae_hyperparameter['latentspace_size']) ##For task 20731 minimum_threshold_user = str(ae_hyperparameter['min_threshold']) maximum_threshold_user = str(ae_hyperparameter['max_threshold']) autoencoder = AeDetector(n_train_data,n_test_data,units,latent_units,activation) opt=ae_hyperparameter['optimizer'] loss_fn=ae_hyperparameter["loss"] batch_size=int(ae_hyperparameter['batch_size']) # loss_fn='binary_crossentropy' epochs=int(ae_hyperparameter['epochs']) learning_rate=float(ae_hyperparameter['learning_rate']) cwd=self.deployLocation try: artifact_dir=os.path.normpath(os.path.join(cwd,'output','autoenc_artifact_dir')) if not os.path.isdir(artifact_dir): os.makedirs(artifact_dir) except Exception as e: self.log.info("<---- artifact_dir path error. Error Msg: ---->"+str(e)) es,mc=self.callbacks(filename = artifact_dir, patience = 5, monitor = "val_loss") self.model_compile(autoencoder,learning_rate, loss_fn, opt) # autoencoder.compile(optimizer='adam', loss='mae') autoencoder.fit(n_train_data, n_train_data, epochs = epochs, batch_size=batch_size, validation_data=(n_test_data, n_test_data),callbacks = [mc, es]) model_info=self.summary(autoencoder) self.log.info("<---- Auto encoder anomaly detection model information: ---->"+str(model_info)) # reconstructed = autoencoder(n_train_data) reconstructed = autoencoder.predict(n_train_data) #threshold = self.find_threshold(autoencoder, n_train_data) train_mae_loss = tf.keras.losses.mae(reconstructed, n_train_data) pred=tf.math.less(train_mae_loss, threshold) ## Task 20731 if ((minimum_threshold_user and minimum_threshold_user.strip()) and (maximum_threshold_user and maximum_threshold_user.strip())): threshold = float(maximum_threshold_user) min_threshold = float(minimum_threshold_user) elif ((minimum_threshold_user.strip()) and (not maximum_threshold_user.strip())): threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss) min_threshold = float(minimum_threshold_user) elif ((not minimum_threshold_user.strip()) and (maximum_threshold_user.strip())): threshold = float(maximum_threshold_user) min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss) else: threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss) min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss) self.log.info("Anomaly threshold max value based on loss fn (MAE): "+str(threshold)) self.log.info("Anomaly threshold min value based on loss fn (MAE): "+str(min_threshold)) pred,test_score_df,actual_data,anomaly_info_df = self.prediction(autoencoder, n_test_data, min_threshold,threshold,test_labels,'False',None,None) # normal_prediction_df=(anomaly_info_df[~anomaly_info_df['anomaly_value']]) normal_prediction_df=anomaly_info_df.loc[anomaly_info_df['anomaly_value']==False] # normal_prediction_df.to_csv('normal_prediction_df.csv') # anomaly_prediction_df=(anomaly_info_df[anomaly_info_df['anomaly_value']]) anomaly_prediction_df=anomaly_info_df.loc[anomaly_info_df['anomaly_value']==True] self.log.info("<---- Autoencoder (non timeseries) based anomaly detection, overall data (both anomaly and non-anomaly ) in data: ---->"+str(anomaly_info_df)) # self.save_anomalyvalues(combined_df,'overall_dataframe') ## If categorical features in original df, then inverse transform the values. try: ##anomaly_info_df,total dataframe. self.naming_anomalyvalues(anomaly_info_df) except Exception as e: self.log.info("anomaly_info_df exception err msg: \n"+str(e)) ##Now we are storing anomaly log (as dataframe) based on two options: 1. Anomalies based on all features, 2. Anomalies based on each individual feature. if (mv_unique_feature_ad.lower()=='true'): ## Multivariate and saving individual feature based anomalies self.save_anomalyvalues(anomaly_prediction_df,(str(feature_name)+'_anomaly_dataframe')) try: anomaly_info_df=self.merge_pre_post_dfs(anomaly_info_df) except Exception as e: #If merge fails, just out!. self.log.info("Anomaly Detection Merge df exception :\n"+str(e)) finally: #check merging done or not, to be imp. pass self.save_anomalyvalues(anomaly_info_df,(str(feature_name)+'_overall_dataframe')) ## Save actual test data actual_data #self.save_anomalyvalues(actual_data,(str(feature_name)+'_testdata')) else: self.save_anomalyvalues(anomaly_prediction_df,'anomaly_dataframe') try: anomaly_info_df=self.merge_pre_post_dfs(anomaly_info_df) except Exception as e: #If merge fails, just out!. self.log.info("Anomaly Detection Merge df exception :\n"+str(e)) finally: #check merging done or not, to be imp. pass self.save_anomalyvalues(anomaly_info_df,'overall_dataframe') ## Save actual test data test_score_df #self.save_anomalyvalues(actual_data,'testdata') self.log.info("<----Number of anomalies in data: ---->"+str(len(anomaly_prediction_df))) """ Plot to show case anomalies, now commented, for testing purpose uncomment and check visually anomalies. """ #ploting df_subset = anomaly_prediction_df fig, ax = plt.subplots() df.plot(legend=False, ax=ax) df_subset.plot(legend=False, ax=ax, color="r") # plt.show() ax.set_title("Anomaly Data Plot") ax.set_xlabel("DateTime") ax.set_ylabel("Values") plot_name='anomalyplot.png' try: plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot')) if not os.path.isdir(plot_dir): os.makedirs(plot_dir) plotpath=str(plot_dir)+'/'+plot_name except Exception as e: self.log.info("<---- plot_dir path error. Error Msg: ---->"+str(e)) if os.path.exists(plotpath): os.remove(plotpath) plt.savefig(plotpath) # plt.savefig(str(plot_dir)+'/'+plot_name) plt.clf() plt.cla() plt.close() else: self.log.info("<---- Check dataset and basic configurations. ---->") except Exception as e: self.log.info("<---- Non time series anomaly detection error msg: ---->"+str(e)) self.log.info("<---- Non time series anomaly detection error msg (detailed): ---->"+str(traceback.format_exc())) return autoencoder,anomaly_prediction_df,anomaly_info_df ## Hyperparameter tuning autoencoders, not implemented def hyperparamtuning_AE(self): try: self.log.info ("autoencoder hyperparam tuning.not implemented.") except Exception as e: self.log.info("autoencoder hyperparam tuning error: "+str(e)) pass ## randomsearch for dbscan def hyperparamtuning_dbscan(self,model,tuner,Parameter_Trials,data): params=model.get_params().keys() try: labels = model.labels_ #df = pd.DataFrame(labels) try: scorer = metrics.silhouette_score(data, labels) except: pass if (tuner.lower() == 'randomsearch'): # Parameters to try cluster_labels = model.labels_ Random_Search = RandomizedSearchCV(model, Parameter_Trials, n_iter=50,cv=5, scoring='adjusted_rand_score', refit=True, n_jobs=1, verbose=5) RandomSearchResults=Random_Search.fit(data) # Fetching the best hyperparameters best_params=RandomSearchResults.best_params_ # All the parameter combinations tried by RandomizedSearchCV RandomSearchResults.cv_results_['params'] except Exception as e: self.log.info("<---- dbscan hpt error msg: ---->"+str(e)) self.log.info("<---- dbscan hpt error msg (detailed): ---->"+str(traceback.format_exc())) return best_params ## Reading aion postprocess data from target->AION_usecaseNo->data->postprocess data def read_inputdata(self): cwd=self.deployLocation try: in_path=os.path.normpath(os.path.join(cwd,'data')) if not os.path.isdir(in_path): self.log.info("<---- Anomaly detection target data folder not available.--->\n") postprocesseddata=os.path.normpath(os.path.join(cwd,'data','postprocesseddata.csv')) postprocessed_df=pd.read_csv(postprocesseddata) except Exception as e: self.log.info("<---- Anomaly detection target data folder not available, Reading postprocess csv file issue. Error Msg: ---->"+str(e)) return postprocessed_df ## Get original dataframe values using preprocess pipe after output data created. ##get_label_dict fn not used now. Use if preprocess_pipe based transform needed. def get_label_dict(self, pipe): label_dict = {} dict_pipe={} for (comp_name, component) in pipe.transformer_list: if 'labelencoding' in comp_name: i=1 for step in component.steps: key='val'+'_'+str(i) ordinalencoder=step[1] dict_pipe[f'val_{i}']=ordinalencoder # dict_pipe[key].append(ordinalencoder) label_dict.update(dict_pipe) i=i+1 return label_dict else: continue return label_dict ## Decode label features using aion preprocessed_pipe model,not used now. If we need to use preprocess pipe for inverse transform,use below block. def decoder_labeled_features(self,df): import joblib try: cwd=self.deployLocation # in_path=os.path.normpath(os.path.join(cwd,'data')) if not os.path.isdir(in_path): self.log.info("<---- Anomaly detection target model folder not available.--->\n") preprocessed_pipe=os.path.normpath(os.path.join(cwd,'model','preprocess_pipe.pkl')) model = joblib.load(preprocessed_pipe) label_dict = get_label_dict(model) encoder=label_dict.get('val_4') num_cols = orig_data.select_dtypes(include=np.number).columns.tolist() cat_cols = orig_data.select_dtypes(exclude=np.number).columns.tolist() cat_col_actual=[] for col in cat_cols: try: df1=encoder.inverse_transform(df[col]) cat_col_actual.append(col) except: pass df1=pd.DataFrame(data=df1) df1.columns=cat_cols df2=df[num_cols] df_anomalyinfo_col=df['anomaly_value'] df_list = [df2, df1, df_anomalyinfo_col] # List of your dataframes combined_df = pd.concat(df_list, join='outer', axis=1).fillna(0) except: combined_df=None pass return combined_df ## save predicted data and actual data columns. For get back user original data features # def merge_pre_post_dfs(self,out_df=None): cwd=self.deployLocation anomaly_algorithm=str(self.anomalyMethod) try: in_path=os.path.normpath(os.path.join(cwd,'data')) if not os.path.isdir(in_path): self.log.info("<---- Anomaly detection target data folder not available.--->\n") preprocessed_file=os.path.normpath(os.path.join(cwd,'data','preprocesseddata.csv')) preprocessed_df=pd.read_csv(preprocessed_file) ## cat_cols will get categorical col from preprocessed, cat_diff_cols will get common cat col between output df and preprocessed. cat_cols=preprocessed_df.select_dtypes(exclude=np.number).columns.tolist() num_cols = preprocessed_df.select_dtypes(include=np.number).columns.tolist() cat_diff_cols=list(set(cat_cols).intersection(out_df.columns.tolist())) diff_cols=list(set(preprocessed_df.columns).difference(out_df.columns)) if (cat_diff_cols): if (len(preprocessed_df) == len(out_df)): #Drop each categorical col of original data from output df (which have numerical converted values). So, in merging can be done on perfect columns try: ## get common categorical col name between actual and output df for col in cat_diff_cols : out_df.drop(col,axis=1,inplace=True) except: self.log.info("drop col not possible, pass the step.") #Just continue pass diff_cols=list(set(preprocessed_df.columns).difference(out_df.columns)) try: ## Check any datetime column in output df and preprocesseddata import pandas.api.types as ptypes outdf_dt_index_check=ptypes.is_datetime64_dtype(out_df.index) #Is output df have datetime col if (outdf_dt_index_check): if ((self.datetimeFeature.lower() !='na' and self.datetimeFeature)): try: preprocessed_df[self.datetimeFeature] = pd.to_datetime(preprocessed_df[self.datetimeFeature]) preprocessed_df.set_index(self.datetimeFeature, inplace=True) except Exception as e: self.log.info("Given data not contain datetime specified."+str(traceback.format_exc())) ## Below step ,making datetime index to date time column. for merging and droping purpose. preprocessed_df.reset_index(inplace=True) preprocessed_df.rename(columns={"index":self.datetimeFeature},inplace=True) out_df.reset_index(inplace=True) out_df.rename(columns={"index":self.datetimeFeature},inplace=True) else: ## If no datetime column, we need to keep both dataframe index columns as unique. so making them as int index. preprocessed_df.reset_index(inplace=True, drop=True) out_df.reset_index(inplace=True, drop=True) pass ## below part is to get status of index columns type (datetime,int or str), commented now. If needed for debug,pls use. # dt_index_check=ptypes.is_datetime64_dtype(out_df.index) # int_index_check=ptypes.is_numeric_dtype(out_df.index) # str_index_check=ptypes.is_string_dtype(out_df.index) ## Get common column between preprocess and output df try: if (anomaly_algorithm.lower() == 'autoencoder'): common_cols=out_df.drop(['loss','max_threshold','min_threshold','anomaly_value'],axis=1) common_cols.drop(common_cols.filter(regex="Unname"),axis=1, inplace=True) merge_on_cols=common_cols.columns.tolist() combined_df = preprocessed_df.merge(out_df, on=merge_on_cols,how='inner') ## Drop duplicate based on columns except time # drop_duplicate_on=merge_on_cols.extend(cat_diff_cols) drop_duplicate_on=merge_on_cols+cat_diff_cols combined_df = combined_df.drop_duplicates(drop_duplicate_on, keep=False) else: ## otherwise, it is dbscan algorithm common_cols=out_df.drop(['cluster','anomaly_value'],axis=1) common_cols.drop(common_cols.filter(regex="Unname"),axis=1, inplace=True) merge_on_cols=common_cols.columns.tolist() combined_df = preprocessed_df.merge(out_df, on=merge_on_cols,how='inner') ## Drop duplicate based on columns except time # drop_duplicate_on=merge_on_cols+cat_diff_cols combined_df = combined_df.drop_duplicates(merge_on_cols, keep='last') except: combined_df=out_df pass ## Just for reference, in future if you want different merge/join option # combined_df = pd.merge(preprocessed_df[diff_cols],out_df, left_index=True, right_index=True, how='inner') except Exception as e: self.log.info("<---- merge error msg : ---->"+str(e)) self.log.info("<---- merge error msg (detailed): ---->"+str(traceback.format_exc())) pass ## if both data frame have different columns (preprocessed and outdf) else: self.log.info("User data is preprocessed and data cleaning happened.So, actual data and processed data length mismatch. So,data records range may vary.") try: # combined_df=self.decoder_labeled_features(out_df) combined_df = preprocessed_df.merge(out_df,on=num_cols,how='inner') # combined_df = combined_df.drop_duplicates(cat_cols, keep='last') combined_df = combined_df.drop_duplicates(num_cols, keep='last') except: ## If nothing in merge works,then make outdf as final dataframe. try: ## If above merge fails, change drop_duplicate hyperparam keep='last' last appearance of key occurance. combined_df = preprocessed_df.merge(out_df,on=num_cols,how='inner') combined_df = combined_df.drop_duplicates(cat_cols, keep=False) except: #If nothing is works, just keep out_df as combined df combined_df=out_df ## if no common categorical col found between preprocessed and outdf. else: ## If merge not works,then make outdf as final dataframe. if (len(cat_cols) > 0): try: combined_df = preprocessed_df.merge(out_df,on=num_cols,how='inner') combined_df = combined_df.drop_duplicates(cat_cols, keep='last') except: #make safe for return combined_df=out_df else: ##If no categorical features available combined_df = preprocessed_df.merge(out_df,on=num_cols,how='inner') combined_df = combined_df.drop_duplicates(num_cols, keep=False) except Exception as e: self.log.info("<---- Anomaly detection target data folder not available, dataframe merging issue. Error Msg: ---->"+str(e)) self.log.info("making output df as final merged data, no categorical column found in output anomaly data. It is user responsibility to check the anomaly data.") #make safe for return combined_df=out_df return combined_df ## for module reusability, this below naming fn created. def naming_anomalyvalues(self,df): replace_values_T='|'.join(['TRUE','True','true']) replace_values_F='|'.join(['FALSE','False','false']) try: df['anomaly_value']=df['anomaly_value'].astype(str).replace(replace_values_T,'AnomalyDataPoint', regex=True) except: df['anomaly_value']=df['anomaly_value'].replace(replace_values_T,'AnomalyDataPoint', regex=True) df['anomaly_value']=df['anomaly_value'].astype(str).replace(replace_values_F,'NormalDataPoint', regex=True) return df ## DBScan based anomaly detection def dbscan_ad(self,data,eps,min_samples,cols): try: tuner='randomsearch' Parameter_Trials={'eps':eps, 'min_samples':min_samples} model = DBSCAN(algorithm='auto') hist = model.fit(data) pred = model.fit_predict(data) best_params = self.hyperparamtuning_dbscan(model,tuner,Parameter_Trials,data) self.log.info("<---- Best hyper parameters for dbscan: ---->"+str(best_params)) best_eps=best_params['eps'] best_min_samples=best_params['min_samples'] if (best_min_samples < len(cols)): min_samples=len(cols)+1 if (best_eps < 0.2): best_eps=0.2 self.log.info("best_eps: \n"+str(best_eps)) self.log.info("best_min_samples: \n"+str(best_min_samples)) best_model=DBSCAN(algorithm='auto',eps = best_eps, min_samples = best_min_samples) hist = best_model.fit(data) pred = best_model.fit_predict(data) best_labels=best_model.labels_ cluster_name = ["Cluster"+str(i) for i in set(best_labels)] # outliers = data[best_model.labels_ == -1] outlier_df = data.copy() outlier_df.loc[:,'cluster'] = best_model.labels_ outliers_final=outlier_df[outlier_df['cluster']==-1] outliers_final['anomaly_value']=outliers_final['cluster']==-1 normaldata= outlier_df[outlier_df['cluster']!=-1] self.log.info("<---- DBScan: Anomalies in data: ---->"+str(outliers_final)) self.log.info("<---- DBScan: Number of anomalies in data: ---->"+str(len(outliers_final))) # num_cat_features=len(self.cat_cols) try: self.save_anomalyvalues(outliers_final,'dbscan_anomaly_dataframe') self.save_anomalyvalues(normaldata,'dbscan_normaldata_dataframe') outlier_df['anomaly_value']=outlier_df['cluster']==-1 outlier_df=self.naming_anomalyvalues(outlier_df) ##Convert results to original input data form for end user ease of understanding try: outlier_df=self.merge_pre_post_dfs(outlier_df) except Exception as e: self.log.info("Anomaly Detection Merge df exception:\n"+str(e)) #If merge fails, just out! pass self.save_anomalyvalues(outlier_df,'dbscan_overall_dataframe') except Exception as e: self.log.info("DBScan inversetransform err. msg: \n"+str(e)) no_clusters = len(set(best_labels)) - (1 if -1 in best_labels else 0) self.log.info("<---- DBScan: No of clusters: ---->"+str(no_clusters)) n_noise_ = list(best_labels).count(-1) ## Ploting the dbscan clusters plot_name='dbscan_anomalyplot.png' fig, ax = plt.subplots() ax.set_title("DBScan Clusters") ax.scatter(data.iloc[:, 0], data.iloc[:, 1], c=best_labels) outliers_plot = data[best_model.labels_ == -1] ax.scatter(outliers_plot.iloc[:, 0], outliers_plot.iloc[:, 1], c='red') cwd=self.deployLocation try: plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot')) if not os.path.isdir(plot_dir): os.makedirs(plot_dir) plotpath=str(plot_dir)+'/'+plot_name except Exception as e: self.log.info("<---- plot_dir path error. Error Msg: ---->"+str(e)) if os.path.exists(plotpath): os.remove(plotpath) plt.savefig(plotpath) plt.clf() plt.cla() plt.close() except Exception as e: self.log.info("<---- dbscan error msg: ---->"+str(e)) self.log.info("<---- dbscan error msg (detailed): ---->"+str(traceback.format_exc())) return best_model,outliers_final ## Inverse transform fn for categorical data def inverse_transform(self,df,cat_cols,le_model): df_new=pd.DataFrame() df_new.index=df.index df_reset_index=df.reset_index(drop=True) for col in cat_cols: df_reset_index[col] = le_model.inverse_transform(df_reset_index[col].astype(int)) df_reset_index.index=df_new.index df=df_reset_index return df ##If data comes without going via aion data profiler, we can use this below preprcessing fn () ##Preprocess fn for categorical data , not used now. def preprocessfn_categorical(self,df): try: cat_cols=self.cat_cols preprocessed_df=None le=preprocessing.LabelEncoder() self.le_model=le label_encoded_df = df.copy() for col in cat_cols: label_encoded_df[col]=le.fit_transform(label_encoded_df[col]) except Exception as e: self.log.info("preprocessfn_categorical error traceback."+str(traceback.format_exc())) return label_encoded_df,cat_cols ## Design pattern: Factory,Adapter. Detect antoencoder object or dbscan object based on input params. The interface can be used for anyother extention. Not created any abstract class. ##Main autoencoder based anomaly detection function, from here, sub modules will be called. def mainAnomalyDetectionfn(self): df=self.df ## reading post processed data from target->usecase->data directory # df=self.read_inputdata() ## Below line overwrite incoming df with postprocesseddata self.log.info("<----------- In autoencoder based anomaly detection algorithm main process module, the incoming dataframe information as below: \n") buf_info=self.get_df_info(df) self.log.info(buf_info) model_location=None time_series_data=None # mv_unique_feature_ad='' ae_hyperparameter=self.paramSpace # self.log.info("mainAnomalyDetectionfn df: \n"+str(df)) self.log.info("paramSpace in mainano: \n"+str(self.paramSpace)) target=self.target anomaly_algorithm=str(self.anomalyMethod) mv_unique_feature_ad=self.mv_featurebased_ad_status # df=self.dataload(datapath) df.drop(df.filter(regex="Unname"),axis=1, inplace=True) df.drop(df.filter(regex="truncated"),axis=1, inplace=True) # cols2remove=df.columns[df.columns.str.startswith('Unname')] # df.drop(cols2remove,axis=1,inplace=True) # df.to_csv("C:\Users\jayaramakrishnans\AppData\Local\Programs\HCLTech\AION\data\target\actual_df.csv") datetime_column=self.datetimeFeature len_dtf=len(self.datetimeFeature) ##create datetime pattern source py file in target dir. self.create_datetime_pyfile() # cat_cols=None if (self.datetimeFeature.lower() == 'na' or self.datetimeFeature==''): len_dtf=0 if (len_dtf >= 1 ): time_series_data="True" else: time_series_data="False" self.datetimeFeature='' try: if (target != ''): if (anomaly_algorithm.lower() == 'autoencoder'): self.log.info("Supervised anomaly detection started.\n") """ Below part for supervised time series anomaly detection.If timeseries anomaly (supervised) used in future, please uncomemnt and use below code snippet. """ # if (ae_hyperparameter['time_series_data'].lower() == 'true'): # print("Given data is time series data and supervised learning category, because it target is labeled one.") # datetime_column=str(self.datetimeFeature) # time_steps=int(ae_hyperparameter['time_steps']) # test_size_perc=int(ae_hyperparameter['test_size_perc']) # df[datetime_column] = pd.to_datetime(df[datetime_column]) # df.set_index(datetime_column, inplace=True) # self.aionAEAnomalyTS(df,test_size_perc,target,time_steps) ## Removing datetime feature for supervised autoencoder (currently timeseries anomaly not supported in supervised anomaly detection autoencoder) test_size_perc=self.testSize*100 df=df.dropna() model,anomaly_prediction_df,combined_df = self.ae_nontimeseriesmodelfn(df,target) # print("*** End of Autoencoder based non time series Anomaly detection. *** \n") self.log.info("*** End of Autoencoder based non time series Anomaly detection. ***") features=df.columns if (len(features)== 1): # print("Problem type is Univariate time series anomaly detection.\n") self.log.info("Problem type is Univariate time series anomaly detection.\n") test_size_perc=self.testSize*100 df=df.dropna() model,anomaly_prediction_df,combined_df = self.ae_nontimeseriesmodelfn(df,target) elif (len(features) > 1): df.drop(df.filter(regex="Unname"),axis=1, inplace=True) test_size_perc=self.testSize*100 df=df.dropna() model,anomaly_prediction_df,combined_df = self.ae_nontimeseriesmodelfn(df,target) if (mv_unique_feature_ad.lower()=='true'): self.log.info("\n\n *** Below is the anomaly values based on each feature of multivariate time series data. ***") df.drop(df.filter(regex="Unname"),axis=1, inplace=True) multivariate_cols= df.columns.values.tolist() for col in multivariate_cols: col=str(col) ## creating dataframe for one of feature in multivariate dataset. multivariate_col_df = df[col].to_frame() model,anomaly_prediction_df,combined_df = self.ae_nontimeseriesmodelfn(multivariate_col_df,target) else: if (anomaly_algorithm.lower() == 'dbscan'): self.log.info("DBScan algorithm not supported for supervised training. \n") else: try: self.log.info("Unsupervised anomaly detection started....\n") if (anomaly_algorithm.lower() == 'autoencoder'): test_size_perc=self.testSize*100 if (time_series_data.lower() == 'true'): mv_unique_feature_ad=self.mv_featurebased_ad_status dropout=float(ae_hyperparameter['dropout']) datetime_column=str(self.datetimeFeature) time_steps=int(ae_hyperparameter['time_steps']) target=None features=df.columns if (len(features)== 1): self.log.info("Problem type is Univariate time series anomaly detection.\n") model,anomaly_prediction_df,combined_df = self.aionAEAnomalyTS(df,test_size_perc,target,time_steps,dropout,'False') elif (len(features) > 1): df.drop(df.filter(regex="Unname"),axis=1, inplace=True) self.log.info("Problem type is Multivariate time series anomaly detection.\n") self.log.info("*** Detecting anomaly in the given multi feature (Multivariate) data. Anomaly values based on all the features passed to the aion anomaly AE algorithm. ***") model,anomaly_prediction_df,combined_df = self.aionAEAnomalyTS(df,test_size_perc,target,time_steps,dropout,'False') if (mv_unique_feature_ad.lower()=='true'): self.log.info("\n\n *** Below is the anomaly values based on each feature of multivariate time series data. ***") df.drop(df.filter(regex="Unname"),axis=1, inplace=True) multivariate_cols= df.columns.values.tolist() # for i in range(1,len(features)): for col in multivariate_cols: col=str(col) ## creating dataframe for one of feature in multivariate dataset. multivariate_col = df[col].to_frame() feature_based_model,anomaly_prediction_df_mv,combined_df = self.aionAEAnomalyTS(multivariate_col,test_size_perc,target,time_steps,dropout,mv_unique_feature_ad) """ Below code snippet is commented, the purpose is AION giving 3 option to user for time series, < Three types: univariate_tsd(single_timeseriesdetection), multivariate_tsd, multivariate_seperate_tsd>, based on that specific sub function called. """ # if (ae_hyperparameter['timeseries_ad_type'].lower() =='univariate_tsad'): ## univariate_tsd # self.aionAEAnomalyTS(df,test_size_perc,target,time_steps,dropout) # elif (ae_hyperparameter['timeseries_ad_type'].lower() =='multivariate_tsad'): ##multivariate_tsd # if (len(features) <=1): # # self.aionAEAnomalyTS(df,test_size_perc,target,time_steps,dropout) # print("Given data looks like univariate data. Cannot apply multivariate. Check data and select appropriate timeseries anomaly detection option.") # else: # self.aionAEAnomalyTS(df,test_size_perc,target,time_steps,dropout) # elif (ae_hyperparameter['timeseries_ad_type'].lower() =='multivariate_featurebased_tsad'): ## multivariate_seperate_tsd # # features=df.columns # df.drop(df.filter(regex="Unname"),axis=1, inplace=True) # multivariate_cols= df.columns.values.tolist() # print("multivariate_cols: \n",multivariate_cols) # print("multivariate_cols type: \n",type(multivariate_cols)) # if (len(features) <=1): # # self.aionAEAnomalyTS(df,test_size_perc,target,time_steps,dropout) # print("Given data looks like univariate data. Cannot use multivariate.") # else: # # for i in range(1,len(features)): # for col in multivariate_cols: # print("processing multivariate feature name: ",col) # col=str(col) # multivariate_col = df[col].to_frame() # print("multivariate_col type: \n",type(multivariate_col)) # self.aionAEAnomalyTS(multivariate_col,test_size_perc,target,time_steps,dropout) # print("*** End of Autoencoder based time series Anomaly detection.*** \n") self.log.info("*** End of Autoencoder based non time series Anomaly detection. ***") else: target='' df=df.dropna() model,anomaly_prediction_df,combined_df = self.ae_nontimeseriesmodelfn(df,target) elif (anomaly_algorithm.lower() == 'dbscan'): # df=df.dropna() self.log.info("*** DBScan algorithm enabled. ***") cols=df.columns dbscan_hyperparameter=self.paramSpace eps = list(dbscan_hyperparameter['eps']) # eps=list(dbscan_hyperparameter['eps']) # min_samples=list(dbscan_hyperparameter['min_samples']) min_samples = list(dbscan_hyperparameter['min_samples']) model,outliers = self.dbscan_ad(df,eps,min_samples,cols) except Exception as e: self.log.info("Unsupervised anomaly detection error msg: "+str(traceback.format_exc())) ##main ae AeDetector except Exception as e: self.log.info("<---- Main fn error msg of anomaly detection for debug purpose: ---->"+str(e)) self.log.info("<---- Main fn error msg of anomaly detection for debug purpose: (detailed): ---->"+str(traceback.format_exc())) return model ## For testing standalone code if __name__ == '__main__': # print ("main function....") target=None df=None hparams=None AEAD=anomalyDetectionAE() datapath,target,ae_hyperparameter = AEAD.mainAnomalyDetectionfn()
anomalyDetector.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import time import os import sys import logging from sklearn.metrics import accuracy_score, make_scorer from sklearn.model_selection import train_test_split from sklearn.svm import OneClassSVM from sklearn.ensemble import IsolationForest import pickle from sklearn import metrics import numpy as np import pandas as pd from learner.aion_matrix import aion_matrix from learner.parameters import parametersDefine from sklearn.metrics import f1_score from sklearn import model_selection from learner.anomalyDetectionAE import anomalyDetectionAE class anomalyDetector(object): def __init__(self): self.log = logging.getLogger('eion') def startanomalydetector(self,df,target,labelMaps,inlierLabels,learnerJson,model_type,saved_model,anomalyMethod,deployLocation,predicted_data_file,testPercentage,anomalyconfig,datetimeFeature,mv_featurebased_ad_status): try: self.log.info("startanomalydetector.... \n") from io import StringIO buf = StringIO() df.info(buf=buf) #self.log.info(buf.getvalue()) self.log.info("User data info : \n"+str(buf.getvalue())) try: df[datetimeFeature] = pd.to_datetime(df[datetimeFeature]) df.set_index(datetimeFeature, inplace=True) #If still the datetime column exist in feature list, drop it. Because we already made datetime feature as index (datetimeindex) df.drop(datetimeFeature,axis=1,inplace=True) except Exception as e: pass ae_df=df paramObj=parametersDefine() anomalyMethod=anomalyMethod inlierLabels=inlierLabels anomalyDetectionType="" inlierLabelList=inlierLabels.split(",") self.log.info("<---- inlierLabels ---->"+inlierLabels) self.log.info("<---- anomalyMethod ---->"+str(anomalyMethod)) if target != "": self.log.info('Status:- |... AnomalyDetection: Supervised') self.log.info("One class based anomaly Detection by relabeling data to fit one class models") combinedString="" dfStr="" anomalyDetectionType="supervised" if not anomalyMethod.lower() == "autoencoder": ##Added for auto encoder self.log.info("startanomalydetector: df: \n"+str(df)) #task 12627 if labelMaps == {}: for inlierVal in inlierLabelList: inlier=inlierVal dfStr = "x ==" + inlier + " or " combinedString+= dfStr func= combinedString.strip(" or ") else: for inlierVal in inlierLabelList: try: if inlierVal.isnumeric(): inlierVal = int(inlierVal) # inlier=str(labelMaps[inlierVal]) ##Wrongly assigned inlier values to labelMaps dict key. inlier = str(inlierVal) dfStr = "x ==" + str(inlier) + " or " combinedString+= dfStr except Exception as e: raise Exception(e) func= combinedString.strip(" or ") labelMaps={'InlierLabel':1,'NonInlierLabel':-1} targetData=df[target] df['anomaly'] = df[target].apply(lambda x: 1 if eval(func) else -1 ) anomtargetData=df['anomaly'] self.log.info("dataframe after relabeling the data") self.log.info(df.head()) self.log.info("target column value counts with inliers and outliers") self.log.info(df['anomaly'].value_counts()) df.drop([target, "anomaly"], axis=1, inplace=True) outliers = anomtargetData[anomtargetData == -1] self.log.info("outliers in data") self.log.info(outliers.shape[0]) self.log.info("outlier fraction") self.log.info(outliers.shape[0]/targetData.shape[0]) if int(testPercentage) != 0: testSize= testPercentage/100 xtrain, xtest, ytrain, ytest = train_test_split(df, anomtargetData, test_size = testSize) else: xtrain =df xtest =df ytrain=anomtargetData ytest =anomtargetData if anomalyMethod.lower() == "isolationforest": modelName="isolationforest" paramSpace=anomalyconfig['modelParams']['IsolationForest'] paramDict =paramObj.paramDefine(paramSpace,'random') ftwo_scorer = make_scorer(accuracy_score) isolation_forest = model_selection.RandomizedSearchCV(IsolationForest(), paramDict, scoring=ftwo_scorer, n_iter=10) mod = isolation_forest.fit(xtrain,ytrain) model = mod.best_estimator_ elif anomalyMethod.lower() == "oneclasssvm": modelName="oneClassSVM" fthree_scorer = make_scorer(accuracy_score) paramSpace=anomalyconfig['modelParams']['oneclassSVM'] paramDict =paramObj.paramDefine(paramSpace,'random') one_class = model_selection.RandomizedSearchCV(OneClassSVM(), paramDict, scoring=fthree_scorer, n_iter=10) mod = one_class.fit(xtrain,ytrain) model = mod.best_estimator_ elif anomalyMethod.lower() == "autoencoder": modelName='autoencoder' testSize=testPercentage/100 self.log.info("Aion Autoencoder anomaly detection started..") paramSpace=anomalyconfig['modelParams']['AutoEncoder'] adae_obj=anomalyDetectionAE(ae_df,paramSpace,deployLocation,target,anomalyMethod,testSize,datetimeFeature,mv_featurebased_ad_status) model=adae_obj.mainAnomalyDetectionfn() self.log.info("Aion Autoencoder anomaly detection completed..") else: self.log.info("IsolationForest, OneClassSVM and autoencoder are supported models") modelName = "" model = "" else: self.log.info('Status:- |... AnomalyDetection: Unsupervised') self.log.info("unsupervised anomaly detection") anomalyDetectionType="unsupervised" model=None xtrain =df xtest = df ytrain = pd.DataFrame() if anomalyMethod.lower() == "isolationforest": paramSpace=anomalyconfig['modelParams']['IsolationForest'] paramDict =paramObj.paramDefine(paramSpace,'random') modelName="isolationforest" def scorer_f(estimator, X): return np.mean(estimator.score_samples(X)) isolation_forest = model_selection.RandomizedSearchCV(IsolationForest(), paramDict, scoring=scorer_f, n_iter=10) mod = isolation_forest.fit(xtrain) self.log.info('---------> Best Param: '+str(mod.best_params_)) model = mod.best_estimator_ elif anomalyMethod.lower() == "oneclasssvm": paramSpace=anomalyconfig['modelParams']['oneclassSVM'] paramDict =paramObj.paramDefine(paramSpace,'random') modelName="oneClassSVM" def scorer_f1(estimator, X): return np.mean(estimator.score_samples(X)) one_class = model_selection.RandomizedSearchCV(OneClassSVM(), paramDict, scoring=scorer_f1, n_iter=10) model = one_class.fit(xtrain) self.log.info('---------> Best Param: '+str(model.best_params_)) model = model.best_estimator_ elif anomalyMethod.lower() == "autoencoder": ae_df.drop(ae_df.filter(regex="Unname"),axis=1, inplace=True) modelName='autoencoder' testSize= testPercentage/100 self.log.info("Aion Autoencoder anomaly detection started..") paramSpace=anomalyconfig['modelParams']['AutoEncoder'] adae_obj=anomalyDetectionAE(ae_df,paramSpace,deployLocation,target,anomalyMethod,testSize,datetimeFeature,mv_featurebased_ad_status) model=adae_obj.mainAnomalyDetectionfn() self.log.info("Aion Autoencoder anomaly detection completed..") elif anomalyMethod.lower() == "dbscan": testSize=testPercentage/100 ae_df.drop(ae_df.filter(regex="Unname"),axis=1, inplace=True) modelName='dbscan' self.log.info("Aion DBScan anomaly detection started..") paramSpace=anomalyconfig['modelParams']['DBScan'] adae_obj=anomalyDetectionAE(ae_df,paramSpace,deployLocation,target,anomalyMethod,testSize,datetimeFeature,mv_featurebased_ad_status) model=adae_obj.mainAnomalyDetectionfn() self.log.info("Aion DBScan anomaly detection completed..") else: self.log.info("IsolationForest,OneClassSVM,autoencoder and DBScan are supported models") modelName = "" model = "" self.log.info('Status:- |... AnomalyDetection Algorithm applied: '+modelName) if (anomalyMethod.lower() == "autoencoder" or anomalyMethod.lower() == "dbscan"): if (anomalyMethod.lower() == "autoencoder"): ## Since autoencoder is implemented using tf.keras, saving the model in h5 format. If we save it in .sav format will give 'TensorSliceReader constructor' error. saved_model=saved_model.replace('.sav','') filename = os.path.join(deployLocation,'model',saved_model) model.save(filename,save_format="tf") elif (anomalyMethod.lower() == "dbscan"): filename = os.path.join(deployLocation,'model',saved_model) pickle.dump(model, open(filename, 'wb')) matrix='' trainmatrix='' accuracy = 0 else: filename = os.path.join(deployLocation,'model',saved_model) pickle.dump(model, open(filename, 'wb')) loaded_model=pickle.load(open(filename, 'rb')) pred_testData=loaded_model.predict(xtest) pred_trainData = loaded_model.predict(xtrain) pred_trainScore = loaded_model.decision_function(xtrain) self.log.info("<--- predicted values of testdata --->") self.log.info(pred_testData) if anomalyDetectionType == "supervised" : df_predicted = pd.DataFrame() df_predicted['actual'] = ytest df_predicted['predict'] = pred_testData df_predicted.to_csv(predicted_data_file) preds = pred_testData targs = ytest unique_elements_ytest, counts_elements_ytest = np.unique(targs, return_counts=True) unique_elements_pred, counts_elements_pred = np.unique(preds, return_counts=True) ''' for i in range(0,len(unique_elements_ytest)): self.log.info("unique value :" +str(unique_elements_ytest[i]) + " count in input testdata: " + str(counts_elements_ytest[i]) +" count in predicted testdata: " + str(counts_elements_pred[i])) self.log.info("\n") ''' self.log.info("\n======= Performance matrix on Test Data ======") aion_matrixobj = aion_matrix() self.log.info("-------> Test Matrix: ") matrix = aion_matrixobj.getClassificationPerformaceMatrix(targs,preds,labelMaps) self.log.info("-------> Train Matrix: ") trainmatrix = aion_matrixobj.getClassificationPerformaceMatrix(ytrain,pred_trainData,labelMaps) #self.log.info("-------> Confusion Matrix: ") self.log.info(metrics.confusion_matrix(targs,preds)) self.log.info("-------> accuracy for inliers: ") accuracy = metrics.accuracy_score(targs, preds) self.log.info(metrics.accuracy_score(targs, preds)) self.log.info("-------> precision for inliers --->") self.log.info(metrics.precision_score(targs, preds)) self.log.info("-------> recall for inliers ---> ") self.log.info(metrics.recall_score(targs, preds)) self.log.info("-------> f1 for inliers--->") self.log.info(metrics.f1_score(targs, preds)) self.log.info("-------> area under curve (auc) for inliers --->") self.log.info(metrics.roc_auc_score(targs, preds)) self.log.info("-------> precision for outliers --->") self.log.info(1-metrics.precision_score(targs, preds)) self.log.info("-------> recall for outliers ---> ") self.log.info(1-metrics.recall_score(targs, preds)) self.log.info("-------> f1 for outliers--->") self.log.info(1-metrics.f1_score(targs, preds)) self.log.info("======= Performance matrix on Test Data End ======\n") else: df_predicted = xtrain df_predicted['predict'] = pred_trainData df_predicted['score'] = pred_trainScore df_predicted.to_csv(predicted_data_file, index=False) matrix = '' trainmatrix = '' accuracy = 'NA' return modelName,model,matrix,trainmatrix,accuracy,labelMaps except Exception as inst: self.log.info("Error: anomalyDetector failed "+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
regressionModel.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' from learner.optimizetechnique import OptimizationTq import warnings from learner.parameters import parametersDefine from learner.defaultAlgos import defaultParams from hyperopt import fmin, tpe, hp, STATUS_OK, Trials import time import logging import os import sys import json from sklearn.svm import SVR from sklearn.linear_model import LinearRegression from sklearn.linear_model import Lasso from sklearn.linear_model import Ridge from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from xgboost import XGBRegressor from lightgbm import LGBMRegressor from catboost import CatBoostRegressor from sklearn.metrics import r2_score from sklearn.metrics import mean_absolute_error,make_scorer from sklearn.metrics import mean_squared_error from learner.aion_matrix import aion_matrix from uncertainties.aionUQ import aionUQ import mlflow class RegressionModel(): def __init__(self,modelList,params,scoreParam,cvSplit,numIter,geneticParam,trainX,trainY,testX,testY,method,deployLocation): self.modelList =modelList self.params =params self.trainX =trainX self.trainY =trainY self.testX = testX self.testY = testY self.method =method self.scoreParam=scoreParam self.cvSplit=cvSplit self.numIter=numIter self.geneticParam=geneticParam self.log = logging.getLogger('eion') self.deployLocation = deployLocation self.uq_x_train = trainX self.uq_x_test = testX self.uq_y_train = trainY self.uq_y_test = testY self.AlgorithmNames={'Linear Regression':'LinearRegression','Lasso':'Lasso','Ridge':'Ridge','Decision Tree':'DecisionTreeRegressor','Random Forest':'RandomForestRegressor','Extreme Gradient Boosting (XGBoost)':'XGBRegressor','Light Gradient Boosting (LightGBM)': 'LGBMRegressor', 'Categorical Boosting (CatBoost)': 'CatBoostRegressor','Bagging (Ensemble)':'BaggingRegressor','Stacking (Ensemble)':'StackingRegressor','Voting (Ensemble)':'VotingRegressor','Neural Architecture Search':'NAS'} self.modelToAlgoNames = {value: key for key, value in self.AlgorithmNames.items()} def logMlflow(self, runName, params, metrices, estimator, algoName=None): with mlflow.start_run(run_name = runName): for k,v in params.items(): mlflow.log_param(k, v) for k,v in metrices.items(): mlflow.log_metric(k, v) if algoName == 'CatBoostRegressor': mlflow.catboost.log_model(estimator, "model") else: mlflow.sklearn.log_model(estimator, "model") model_uri = mlflow.get_artifact_uri("model") """ for some dataset evaluate takes more than 90 min, so commenting till some solution is not found evaluate_data = self.testX.copy() evaluate_data['target'] = self.testY.copy() mlflow.evaluate(model_uri, data=evaluate_data, targets='target', model_type="regressor") del evaluate_data """ def regressionModelling(self,modelOrFeatureBased, code_configure): paramObj=parametersDefine() bestModel='' bestParams={} import sys bestScore=-sys.float_info.max #bugfix 11656 scoredetails = '' self.log.info('\n---------- Regression Model has started ----------') try: self.log.info('Status:- |... Search Optimization Method applied: '+self.method) for modelName in self.modelList: objClf = aion_matrix() if modelName in ['Bagging (Ensemble)','Voting (Ensemble)','Stacking (Ensemble)','Neural Architecture Search']: if modelName == 'Bagging (Ensemble)': from ensemble.ensemble_bagging import ensemble_bagging ensemble_bagging_obj = ensemble_bagging(self.params[modelName],self.scoreParam,0,0) estimator,modelParams,score,model = ensemble_bagging_obj.ensemble_bagging__regressor(self.trainX,self.trainY,self.testX,self.testY) if modelName == 'Stacking (Ensemble)': from ensemble.ensemble_stacking import ensemble_stacking ensemble_stacking_obj = ensemble_stacking(self.params[modelName],self.scoreParam) estimator,modelParams,score,model = ensemble_stacking_obj.ensemble_stacking__regressor(self.trainX,self.trainY,self.testX,self.testY,self.modelList) if modelName == 'Voting (Ensemble)': from ensemble.ensemble_voting import ensemble_voting ensemble_voting_obj = ensemble_voting(self.params[modelName],self.scoreParam) estimator,modelParams,score,model = ensemble_voting_obj.ensemble_voting__regressor(self.trainX,self.trainY,self.testX,self.testY,self.modelList) ''' if modelName == 'Neural Architecture Search': from nas.aionNAS import aionNAS objNAS = aionNAS('Regression',self.params[modelName],self.trainX,self.testX,self.trainY,self.testY,self.deployLocation) estimator,modelParams,score,model=objNAS.nasMain(self.scoreParam) ''' if(scoredetails != ''): scoredetails += ',' scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","FeatureEngineering":"'+str(modelOrFeatureBased)+'","Score":'+str(score)+',"ModelUncertainty":"NA"}' if self.scoreParam == "r2": if score > bestScore: bestScore =score bestModel =model bestParams=modelParams bestEstimator=estimator else: if abs(score) < bestScore or bestScore == -sys.float_info.max: bestScore =abs(score) bestModel =model bestParams=modelParams bestEstimator=estimator self.log.info('Status:- |... ML Algorithm applied: '+modelName) self.log.info('Status:- |... Score: '+objClf.get_print_score(self.scoreParam)+'='+str(round(score,2))+'\n') continue if modelName not in self.params: continue paramSpace=self.params[modelName].copy() algoName = self.AlgorithmNames[modelName] paramDict =paramObj.paramDefine(paramSpace,self.method) if self.method == 'bayesopt': code_configure.add_model(algoName,paramSpace) else: paramDictCopy = paramDict # numpy array is not json serializable #numpy is already imported but still np.ndarray raise error import numpy as np for key,value in paramDictCopy.items(): if isinstance(value, np.ndarray): paramDictCopy[key] = paramDictCopy[key].tolist() code_configure.add_model(algoName,paramDictCopy) if not self.method == 'bayesopt': paramSize = paramObj.getParamSpaceSize(paramDict) else: paramSize = 0 if (self.method == 'bayesopt' and not paramDict) or (not self.method == 'bayesopt' and paramSize<=1): try: start = time.time() #function call defObj = defaultParams(algoName,paramDict,self.scoreParam,0,0,paramSize) estimator, modelParams, model,score =defObj.startTrainingRegression(self.trainX,self.trainY,self.testX,self.testY) executionTime = time.time() - start self.log.info('---------> Total Execution: ' + str(executionTime) + '\n') if (scoredetails != ''): scoredetails += ',' scoredetails += '{"Model":"' + self.modelToAlgoNames[model] + '","FeatureEngineering":"' + str( modelOrFeatureBased) + '","Score":' + str(score) + ',"ModelUncertainty":"NA"}' if self.scoreParam == "r2": if score > bestScore: bestScore = score bestModel = model bestParams = modelParams bestEstimator = estimator else: if abs(score) < bestScore or bestScore == -sys.float_info.max: bestScore = abs(score) bestModel = model bestParams = modelParams bestEstimator = estimator self.log.info('Status:- |... ML Algorithm applied: ' + modelName) self.log.info('Status:- |... Score: ' + objClf.get_print_score(self.scoreParam) + '=' + str( round(score, 2)) + '\n') except Exception as inst: self.log.info('\n < ---------- Model Execution Failed Start--------->') self.log.info('\n<-------' + modelName + ' Model Execution failed!!!.' + str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) self.log.info('\n < ---------- Model Execution Failed End --------->') continue trainingStatus = 'Success' if self.method =='grid': try: self.log.info("-------> Optimization Method :Grid Search") self.log.info("-------> Model Name: "+str(modelName)) opTq =OptimizationTq(algoName,paramDict,self.cvSplit,self.scoreParam,self.numIter,self.trainX,self.trainY) start = time.time() model,modelParams,score,estimator=opTq.gridSearchOpt() executionTime=time.time() - start if not self.testX.empty: predictedData = estimator.predict(self.testX) if 'neg_mean_squared_error' in self.scoreParam: meanssquatederror = mean_squared_error(self.testY,predictedData) score = meanssquatederror elif 'neg_root_mean_squared_error' in self.scoreParam: rootmeanssquatederror=mean_squared_error(self.testY,predictedData,squared=False) score = rootmeanssquatederror elif 'mae' in self.scoreParam: meanabsoluteerror=mean_absolute_error(self.testY,predictedData) score = meanabsoluteerror elif 'r2' in self.scoreParam: r2score=r2_score(self.testY,predictedData) score = r2score problemName = estimator.__class__.__name__ runName = algoName + '_' + modelOrFeatureBased metrices = {} metrices["score"] = score try: self.logMlflow(runName, modelParams, metrices, estimator,algoName) except Exception as e: self.log.info('\n-----> ML flow error!!!.' + str(e)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) # raise pass uq_jsonobject = '' try: if model in ['XGBRegressor','LGBMRegressor','CatBoostRegressor']: self.log.info('-----> Model Uncertainty Not Supported') else: problemName = estimator.__class__.__name__ uqObj=aionUQ(None,None,None,problemName,modelParams,estimator,None,None,self.deployLocation) total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject=uqObj.uqMain_BBMRegression(self.uq_x_train,self.uq_x_test,self.uq_y_train,self.uq_y_test,"aionuq") self.log.info("-------> model_confidence: "+str(total_picp_percentage)+str('%')) self.log.info("-------> model_uncertainty: "+str(total_Uncertainty_percentage)+str('%')) except: pass if(scoredetails != ''): scoredetails += ',' scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","FeatureEngineering":"'+str(modelOrFeatureBased)+'","Score":'+str(abs(score))+',"ModelUncertainty":'+str(json.dumps(uq_jsonobject))+'}' self.log.info('---------> Total Execution: '+str(executionTime)+'\n') if self.scoreParam == "r2": if score > bestScore: bestScore =score bestModel =model bestParams=modelParams bestEstimator=estimator else: if abs(score) < bestScore or bestScore == -sys.float_info.max: bestScore =abs(score) bestModel =model bestParams=modelParams bestEstimator=estimator except Exception as inst: self.log.info('\n < ---------- Model Execution Failed Start--------->') self.log.info('\n<-------'+ modelName+' Model Execution failed!!!.'+str(inst)) self.log.info('\n < ---------- Model Execution Failed End --------->') trainingStatus = 'Error (Exception)' elif self.method == 'random': try: self.log.info("-------> Optimization Method :Random Search") self.log.info("-------> Model Name: "+str(modelName)) opTq =OptimizationTq(algoName,paramDict,self.cvSplit,self.scoreParam,self.numIter,self.trainX,self.trainY) start = time.time() model,modelParams,score,estimator=opTq.randomSearchOpt() executionTime=time.time() - start if not self.testX.empty: predictedData = estimator.predict(self.testX) if 'neg_mean_squared_error' in self.scoreParam: meanssquatederror = mean_squared_error(self.testY,predictedData) score = meanssquatederror elif 'neg_root_mean_squared_error' in self.scoreParam: rootmeanssquatederror=mean_squared_error(self.testY,predictedData,squared=False) score = rootmeanssquatederror elif 'mae' in self.scoreParam: meanabsoluteerror=mean_absolute_error(self.testY,predictedData) score = meanabsoluteerror elif 'r2' in self.scoreParam: r2score=r2_score(self.testY,predictedData) score = r2score if self.scoreParam == "r2": if score>bestScore: bestScore =score bestModel =model bestParams=modelParams bestEstimator=estimator else: if abs(score) < bestScore or bestScore == -sys.float_info.max: bestScore =abs(score) bestModel =model bestParams=modelParams bestEstimator=estimator problemName = estimator.__class__.__name__ runName = algoName + '_' + modelOrFeatureBased metrices = {} metrices["score"] = score try: self.logMlflow(runName, modelParams, metrices, estimator,algoName) except Exception as e: self.log.info('\n-----> ML flow error!!!.' + str(e)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) # raise pass uq_jsonobject = '' try: if model in ['XGBRegressor','LGBMRegressor','CatBoostRegressor']: self.log.info('-----> Model Uncertainty Not Supported') else: problemName = estimator.__class__.__name__ uqObj=aionUQ(None,None,None,problemName,modelParams,estimator,None,None,self.deployLocation) total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject=uqObj.uqMain_BBMRegression(self.uq_x_train,self.uq_x_test,self.uq_y_train,self.uq_y_test,"aionuq") self.log.info("-------> model_confidence: "+str(total_picp_percentage)+str('%')) self.log.info("-------> model_uncertainty: "+str(total_Uncertainty_percentage)+str('%')) except Exception as e: print(e) pass if(scoredetails != ''): scoredetails += ',' scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","FeatureEngineering":"'+str(modelOrFeatureBased)+'","Score":'+str(abs(score))+',"ModelUncertainty":'+str(json.dumps(uq_jsonobject))+'}' except Exception as inst: self.log.info('\n < ---------- Model Execution Failed Start--------->') self.log.info('\n<-------'+ modelName+' Model Execution failed!!!.'+str(inst)) self.log.info('\n < ---------- Model Execution Failed End --------->') trainingStatus = 'Error (Exception)' elif self.method == 'bayesopt': try: self.log.info("-------> Optimization Method :BayesOpt Search") self.log.info("-------> Model Name: "+str(modelName)) opTq =OptimizationTq(algoName,paramDict,self.cvSplit,self.scoreParam,self.numIter,self.trainX,self.trainY) fun=opTq.f trials = Trials() start = time.time() best = fmin(fun,paramDict,algo=tpe.suggest, max_evals=100, trials=trials) executionTime=time.time() - start results = sorted(trials.results, key = lambda x: x['loss']) bestresult=results[0] model=bestresult['model'] score=bestresult['score'] modelParams=bestresult['params'] res = ', '.join("{!s}={!r}".format(key,val) for (key,val) in modelParams.items()) modelObj=eval(model+'('+res+')') estimator = modelObj.fit(self.trainX,self.trainY) if not self.testX.empty: predictedData = estimator.predict(self.testX) if 'neg_mean_squared_error' in self.scoreParam: meanssquatederror = mean_squared_error(self.testY,predictedData) score = meanssquatederror elif 'neg_root_mean_squared_error' in self.scoreParam: rootmeanssquatederror=mean_squared_error(self.testY,predictedData,squared=False) score = rootmeanssquatederror elif 'mae' in self.scoreParam: meanabsoluteerror=mean_absolute_error(self.testY,predictedData) score = meanabsoluteerror elif 'r2' in self.scoreParam: r2score=r2_score(self.testY,predictedData) score = r2score problemName = estimator.__class__.__name__ runName = algoName + '_' + modelOrFeatureBased metrices = {} metrices["score"] = score try: self.logMlflow(runName, modelParams, metrices, estimator,algoName) except Exception as e: self.log.info('\n-----> ML flow error!!!.' + str(e)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) # raise pass if self.scoreParam == "r2": if score>bestScore: bestScore =score bestModel =model bestParams=modelParams bestEstimator=estimator else: if abs(score) < bestScore or bestScore == -sys.float_info.max: bestScore =abs(score) bestModel =model bestParams=modelParams bestEstimator=estimator uq_jsonobject = '' try: if model in ['XGBRegressor','LGBMRegressor','CatBoostRegressor']: self.log.info('-----> Model Uncertainty Not Supported') else: problemName = estimator.__class__.__name__ uqObj=aionUQ(None,None,None,problemName,modelParams,estimator,None,None,self.deployLocation) total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject=uqObj.uqMain_BBMRegression(self.uq_x_train,self.uq_x_test,self.uq_y_train,self.uq_y_test,"aionuq") self.log.info("-------> model_confidence: "+str(total_picp_percentage)+str('%')) self.log.info("-------> model_uncertainty: "+str(total_Uncertainty_percentage)+str('%')) except: pass if(scoredetails != ''): scoredetails += ',' scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","FeatureEngineering":"'+str(modelOrFeatureBased)+'","Score":'+str(abs(score))+',"ModelUncertainty":'+str(json.dumps(uq_jsonobject))+'}' self.log.info('---------> Total Execution: '+str(executionTime)+'\n') except Exception as inst: self.log.info('\n < ---------- Model Execution Failed Start--------->') self.log.info('\n<-------'+ modelName+' Model Execution failed!!!.'+str(inst)) self.log.info('\n < ---------- Model Execution Failed End --------->') trainingStatus = 'Error (Exception)' else: trainingStatus = 'Error (HyperTunning Algo Not Supported)' pass self.log.info('Status:- |... ML Algorithm applied: '+modelName) if trainingStatus.lower() == 'success': self.log.info('Status:- |... Score after hyperparameter tuning: '+objClf.get_print_score(self.scoreParam)+'='+str(round(score,2))+'\n') else: self.log.info('Status:- |... Training Error : '+trainingStatus+'\n') if bestModel != 'None': self.log.info('---------- Regression Model End ---------- \n') self.log.info('\n------- Best Model and its parameters -------------') self.log.info('Status:- |... Best Algorithm selected: '+str(self.modelToAlgoNames[bestModel])+' Score='+str(round(bestScore,2))) self.log.info("-------> Best Name: "+str(bestModel)) self.log.info("-------> Best Score: "+str(bestScore)) else: raise Exception("Sorry, no model is trained") return self.modelToAlgoNames[bestModel],bestParams,bestScore,bestEstimator,scoredetails except Exception as inst: self.log.info( '\n-----> regressionModel failed!!!.'+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
defaultAlgos.py
import numpy as np # from learner.classificationModel import ClassifierModel from learner.aion_matrix import aion_matrix from sklearn.metrics import r2_score from sklearn.metrics import mean_absolute_error,make_scorer from sklearn.metrics import mean_squared_error class defaultParams(): def __init__(self, modelName, paramDict, scoreParam, MakeFP0, MakeFN0,paramSize): self.modelName = modelName self.paramDict = paramDict self.scoreParam = scoreParam self.MakeFP0 = MakeFP0 self.MakeFN0 = MakeFN0 self.dictsize = paramSize def paramDictConvertion(self): if self.dictsize != 0: for keys in self.paramDict.keys(): self.paramDict[keys] = self.paramDict[keys][0] def startTrainingClassification(self, trainX, trainY, testX, testY): threshold = -1 precisionscore = -1 recallscore = -1 objClf = aion_matrix() self.paramDictConvertion() if self.modelName == 'LogisticRegression': from sklearn import linear_model estimator = linear_model.LogisticRegression() if self.modelName == 'GaussianNB': from sklearn.naive_bayes import GaussianNB estimator = GaussianNB() if self.modelName == 'SVC': from sklearn import svm estimator = svm.SVC() if self.modelName == 'KNeighborsClassifier': from sklearn.neighbors import KNeighborsClassifier estimator = KNeighborsClassifier() if self.modelName == 'DecisionTreeClassifier': from sklearn.tree import DecisionTreeClassifier estimator = DecisionTreeClassifier() if self.modelName == 'RandomForestClassifier': from sklearn.ensemble import RandomForestClassifier estimator = RandomForestClassifier() if self.modelName == 'GradientBoostingClassifier': from sklearn.ensemble import GradientBoostingClassifier estimator = GradientBoostingClassifier() if self.modelName == 'XGBClassifier': import xgboost as xgb estimator = xgb.XGBClassifier() if self.modelName == 'CatBoostClassifier': from catboost import CatBoostClassifier estimator = CatBoostClassifier() if self.modelName == 'LGBMClassifier': from lightgbm import LGBMClassifier estimator = LGBMClassifier() if self.dictsize != 0: estimator.set_params(**self.paramDict) estimator.fit(trainX, trainY) if not testX.empty: predictedData = estimator.predict(testX) score = objClf.get_score(self.scoreParam, testY, predictedData) if self.MakeFP0: self.log.info('-------- Calculate Threshold for FP Start-------') startRange = 0.0 endRange = 1.0 stepsize = 0.01 threshold_range = np.arange(startRange, endRange, stepsize) threshold, precisionscore, recallscore = objClf.check_threshold(estimator, trainX, trainY, threshold_range, 'FP', self.modelName) self.log.info('-------- Calculate Threshold for FP End-------') if self.MakeFN0: self.log.info('-------- Calculate Threshold for FN Start-------') startRange = 1.0 endRange = 0.0 stepsize = -0.01 threshold_range = np.arange(startRange, endRange, stepsize) threshold, precisionscore, recallscore = objClf.check_threshold(estimator, trainX, trainY, threshold_range, 'FN', self.modelName) self.log.info('-------- Calculate Threshold for FN End-------') else: predictedData = estimator.predict(trainX) score = objClf.get_score(self.scoreParam, trainY, predictedData) if self.MakeFP0: self.log.info('-------- Calculate Threshold for FP Start-------') startRange = 0.0 endRange = 1.0 stepsize = 0.01 threshold_range = np.arange(startRange, endRange, stepsize) threshold, precisionscore, recallscore = objClf.check_threshold(estimator, trainX, trainY, threshold_range, 'FP', self.modelName) self.log.info('-------- Calculate Threshold for FP End-------') if self.MakeFN0: self.log.info('-------- Calculate Threshold for FN Start-------') startRange = 1.0 endRange = 0.0 stepsize = -0.01 threshold_range = np.arange(startRange, endRange, stepsize) threshold, precisionscore, recallscore = objClf.check_threshold(estimator, trainX, trainY, threshold_range, 'FN', self.modelName) self.log.info('-------- Calculate Threshold for FN End-------') # status, bscore, bthres, brscore, bpscore = objClf.getBestModel(self.MakeFP0,self.MakeFN0, threshold, # bestthreshold, recallscore, bestrecallscore, # precisionscore, bestprecisionscore, score, # bestScore) return estimator, estimator.get_params(), self.modelName, score, threshold, precisionscore, recallscore def startTrainingRegression(self, trainX, trainY, testX, testY): #objClf = aion_matrix() try: score = 0 self.paramDictConvertion() if self.modelName=="LinearRegression": from sklearn import linear_model estimator = linear_model.LinearRegression() if self.modelName=="Lasso": from sklearn import linear_model estimator = linear_model.Lasso() if self.modelName=="Ridge": from sklearn import linear_model estimator = linear_model.Ridge() if self.modelName=="DecisionTreeRegressor": from sklearn.tree import DecisionTreeRegressor estimator = DecisionTreeRegressor() if self.modelName=="RandomForestRegressor": from sklearn.ensemble import RandomForestRegressor estimator = RandomForestRegressor() if self.modelName== "XGBRegressor": import xgboost as xgb estimator = xgb.XGBRegressor() if self.modelName == 'CatBoostRegressor': from catboost import CatBoostRegressor estimator = CatBoostRegressor() if self.modelName == 'LGBMRegressor': from lightgbm import LGBMRegressor estimator = LGBMRegressor() if self.dictsize != 0: estimator.set_params(**self.paramDict) estimator.fit(trainX, trainY) except Exception as e: print(e) if not testX.empty: predictedData = estimator.predict(testX) if 'neg_mean_squared_error' in self.scoreParam: meanssquatederror = mean_squared_error(testY, predictedData) score = meanssquatederror elif 'neg_root_mean_squared_error' in self.scoreParam: rootmeanssquatederror = mean_squared_error(testY, predictedData, squared=False) score = rootmeanssquatederror elif 'mae' in self.scoreParam: meanabsoluteerror = mean_absolute_error(testY, predictedData) score = meanabsoluteerror elif 'r2' in self.scoreParam: r2score = r2_score(testY, predictedData) score = r2score else: predictedData = estimator.predict(trainX) if 'neg_mean_squared_error' in self.scoreParam: meanssquatederror = mean_squared_error(trainY, predictedData) score = meanssquatederror elif 'neg_root_mean_squared_error' in self.scoreParam: rootmeanssquatederror = mean_squared_error(trainY, predictedData, squared=False) score = rootmeanssquatederror elif 'mae' in self.scoreParam: meanabsoluteerror = mean_absolute_error(trainY, predictedData) score = meanabsoluteerror elif 'r2' in self.scoreParam: r2score = r2_score(trainY, predictedData) score = r2score return estimator, estimator.get_params(), self.modelName, score
objectDetector.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import sys import json import shutil import tarfile import logging import subprocess from os.path import expanduser import platform from pathlib import Path, PurePosixPath import tensorflow.compat.v2 as tf os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging (1) from google.protobuf import text_format ''' from object_detection import model_lib_v2 from object_detection import model_main_tf2 from object_detection import exporter_lib_v2 from object_detection.utils import config_util from object_detection.protos import pipeline_pb2 ''' from learner.cloudServer import awsGPUTraining class objectDetector(object): def __init__(self, dataLocation, pretrainedModels, modelDirName,remoteTrainingConfig): self.log = logging.getLogger('eion') self.dataLocation = dataLocation self.pretrainedModels = Path(pretrainedModels) self.modelDirName = Path(modelDirName['file']) self.modelURLDict = modelDirName self.gpu = remoteTrainingConfig['Enable'] self.serverConfig = remoteTrainingConfig self.modelOutput = Path(dataLocation).parent/"export" if remoteTrainingConfig['Enable']: ''' if not Path(serverConfigFile).is_file(): raise ValueError("Gpu training is enabled but server config file is not present.") with open(serverConfigFile) as fObj: self.serverConfig = json.load(fObj) ''' self.tfRecordLoc = PurePosixPath('aion/data/od') self.pipelineLoc = PurePosixPath('aion/data/od') self.labelMapLoc = PurePosixPath('aion/data/od') self.gpuPretrainedModelPath = PurePosixPath('aion/pretrainedModels')/self.modelDirName else: self.tfRecordLoc = Path(dataLocation) self.pipelineLoc = Path(dataLocation) self.labelMapLoc = Path(dataLocation) self.gpuPretrainedModelPath = None def prepareConfig(self, detectionModel, num_classes, n_epoch, batch_size): home = expanduser("~") if platform.system() == 'Windows': modelPath = os.path.join(home,'AppData','Local','HCLT','AION','PreTrainedModels','ObjectDetection') else: modelPath = os.path.join(home,'HCLT','AION','PreTrainedModels','ObjectDetection') pipeline_config = str(modelPath/self.modelDirName/"pipeline.config") checkPoint = "ckpt-0" with open(str(modelPath/self.modelDirName/"checkpoint/checkpoint")) as f: line = f.readline() checkPoint = line.split(':')[1].strip()[1:-1] #(model_checkpoint_path: "ckpt-301") to ckpt-301 checkPoint = "checkpoint/"+checkPoint from object_detection.utils import config_util configs = config_util.get_configs_from_pipeline_file(pipeline_config) model_config = configs['model'] if detectionModel.lower() == 'ssd': model_config.ssd.num_classes = num_classes configs['train_config'].fine_tune_checkpoint_type = "detection" elif detectionModel.lower() == 'centernet': model_config.center_net.num_classes = num_classes configs['train_config'].fine_tune_checkpoint_type = "fine_tune" elif detectionModel.lower() == 'fasterrcnn': model_config.faster_rcnn.num_classes = num_classes configs['train_config'].fine_tune_checkpoint_type = "detection" else: raise ValueError("{} Model is not supported for object detection.\n".format(detectionModel)) if self.gpu: checkpointPath = str(self.gpuPretrainedModelPath / checkPoint) else: checkpointPath = str(modelPath/self.modelDirName/checkPoint) configs['train_config'].fine_tune_checkpoint = checkpointPath configs['train_config'].num_steps = n_epoch configs['train_config'].batch_size = batch_size configs['train_input_config'].tf_record_input_reader.input_path[:] = [str(self.tfRecordLoc/"train.tfrecord")] configs['train_input_config'].label_map_path = str(self.labelMapLoc/"label_map.pbtxt") configs['eval_input_config'].tf_record_input_reader.input_path[:] = [self.dataLocation + "/test.tfrecord"] configs['eval_input_config'].label_map_path = self.dataLocation + "/label_map.pbtxt" # Save new pipeline config new_pipeline_proto = config_util.create_pipeline_proto_from_configs(configs) config_util.save_pipeline_config(new_pipeline_proto, self.dataLocation) def __exportModel(self): self.log.info('-------> exporting trained Model') from object_detection.protos import pipeline_pb2 from object_detection import exporter_lib_v2 pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() with tf.io.gfile.GFile(str(self.pipelineLoc/"pipeline.config"), 'r') as f: text_format.Merge(f.read(), pipeline_config) text_format.Merge('', pipeline_config) exporter_lib_v2.export_inference_graph( 'image_tensor', pipeline_config, self.dataLocation, str(self.modelOutput)) def startObjectDetector(self): if self.gpu: self.log.info('-------> Training on the cloud machine') self.log.info('Status:- |...Remote Machine Training') with open(self.dataLocation+'\model.config', 'w')as f: json.dump( self.modelURLDict, f) awsGpu = awsGPUTraining(self.serverConfig) try: awsGpu.start_instance() awsGpu.copy_files_to_server(self.dataLocation) awsGpu.start_executing_notebook() self.log.info('-------> Downloading trained model file') tarFile = awsGpu.copy_file_from_server(self.dataLocation) with tarfile.open(tarFile) as tar: tar.extractall(self.dataLocation) awsGpu.stop_server_instance() except: awsGpu.stop_server_instance() raise extractedPath = Path(self.dataLocation)/Path(tarFile).name.split('.')[0] filesList = extractedPath.glob('**/*') for file in filesList: if file.parent == extractedPath: if file.name == "export": shutil.copytree(file, self.modelOutput) elif file.is_dir(): shutil.copytree(file, Path(self.dataLocation)/file.name) else: shutil.copy2(file, self.dataLocation) shutil.rmtree(extractedPath) Path(tarFile).unlink() shutil.copy2(self.dataLocation + "/label_map.pbtxt", str(self.modelOutput)) else: self.log.info('-------> Training on the local machine') self.log.info('Status:- |...Local Machine Training') tf.config.set_soft_device_placement(True) strategy = tf.compat.v2.distribute.MirroredStrategy() with strategy.scope(): try: from object_detection import model_lib_v2 model_lib_v2.train_loop( pipeline_config_path=str(self.pipelineLoc/"pipeline.config"), model_dir=str(self.dataLocation)) except Exception: raise self.__exportModel() shutil.copy2(str(self.labelMapLoc/"label_map.pbtxt"), str(self.modelOutput)) def evaluateObjectDetector(self, model_dir, pipeline_config_dir=None, checkpoint_dir=None): if checkpoint_dir == None: checkpoint_dir = model_dir if pipeline_config_dir == None: pipeline_config_dir = model_dir self.log.info('-------> Evaluation started') from object_detection import model_main_tf2 cmd = '"{}" "{}" --model_dir="{}" --pipeline_config_path="{}/pipeline.config" --checkpoint_dir="{}" --eval_timeout=6'.format(sys.executable, model_main_tf2.__file__, model_dir, model_dir, checkpoint_dir) result = subprocess.run(cmd , capture_output=True, text=True,shell=True) precisionParam = ['Average Precision', 'Average Recall'] text = result.stdout.split('\n') stats = {} keys = [] try: for x in text: for y in precisionParam: indx = x.find(y) if indx != -1: keyValue = x[indx:].split(' = ') stats[keyValue[0]] = keyValue[1] keys.append(keyValue[0]) except Exception as e: raise ValueError("Error in evaluation: " + str(e)) self.log.info('-------> Evaluation statistics:') self.log.info(stats) return stats, keys
optimizetechnique.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import SGDClassifier, PassiveAggressiveClassifier from sklearn.linear_model import SGDRegressor, PassiveAggressiveRegressor from sklearn.linear_model import SGDClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import cross_val_score from sklearn.svm import SVC from hyperopt import fmin, tpe, hp, STATUS_OK, Trials from sklearn.svm import SVR import xgboost as xgb from xgboost import XGBClassifier from lightgbm import LGBMClassifier from catboost import CatBoostClassifier from xgboost import XGBRegressor from lightgbm import LGBMRegressor from catboost import CatBoostRegressor from sklearn.linear_model import LinearRegression from sklearn.linear_model import Lasso from sklearn.linear_model import Ridge from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor import warnings warnings.filterwarnings('ignore') import time import logging import sys,os class StreamToLogger(object): def __init__(self, logger, log_level=logging.INFO): self.logger = logger self.log_level = log_level self.linebuf = '' def write(self, buf): for line in buf.rstrip().splitlines(): self.logger.log(self.log_level, 'Model:- Iteration:: '+line.rstrip()) class OptimizationTq(): def __init__(self,modelName,tuneParams,cvSplit,scoreParam,nIter,trainX,trainY,geneticParam=None): self.data = None self.model=modelName self.params =tuneParams self.cvSplit=cvSplit self.scoreParam=scoreParam self.trainX =trainX self.trainY = trainY self.geneticParam=geneticParam if geneticParam else {} self.nIter =nIter self.count =0 self.best =0 self.log = logging.getLogger('eion') def gridSearchOpt(self): try: sl = StreamToLogger(self.log, logging.INFO) oldStdout = sys.stdout sys.stdout = sl self.log.info('Model:-Model Name:: '+str(self.model)) modelObj=eval(self.model+'()') gridOp = GridSearchCV(modelObj, param_grid=self.params,scoring=self.scoreParam, cv=self.cvSplit,verbose=10) gridFit=gridOp.fit(self.trainX,self.trainY) self.log.info('Model:-Model Name:: '+str(self.model)) self.log.info('Model:-ScoringType:: '+str(gridFit.scorer_)) self.log.info('Model:-Best Param:: '+str(gridFit.best_params_)) self.log.info('Model:-Validation Score:: '+str(gridFit.best_score_)) self.log.info('Model:-CV Result:: '+str(gridFit.cv_results_)) self.log.info('Model:-Best Estimator:: '+str(gridFit.best_estimator_)) sys.stdout = oldStdout return self.model,gridFit.best_params_,gridFit.best_score_,gridFit.best_estimator_ except Exception as inst: self.log.info("gridSearchOpt failed ==>"+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def randomSearchOpt(self): try: sl = StreamToLogger(self.log, logging.INFO) oldStdout = sys.stdout sys.stdout = sl self.log.info('Model:-Model Name:: '+str(self.model)) modelObj=eval(self.model+'()') randomOp = RandomizedSearchCV(modelObj, param_distributions=self.params,scoring=self.scoreParam,n_iter=self.nIter,cv=self.cvSplit,verbose=10) randomFit=randomOp.fit(self.trainX,self.trainY) self.log.info('Model:-Model Name:: '+str(self.model)) self.log.info('Model:-ScoringType:: '+str(randomFit.scorer_)) self.log.info('Model:-Best Param:: '+str(randomFit.best_params_)) self.log.info('Model:-Validation Score:: '+str(randomFit.best_score_)) self.log.info('Model:-CV Result:: '+str(randomFit.cv_results_)) self.log.info('Model:-Best Estimator:: '+str(randomFit.best_estimator_)) sys.stdout = oldStdout return self.model,randomFit.best_params_,randomFit.best_score_,randomFit.best_estimator_ except Exception as inst: self.log.info("RandomsearchOptimization failed ==>"+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def bayesianOpt(self,params): modelObj=eval(self.model+'(**'+str(params)+')') score=cross_val_score(modelObj, self.trainX, self.trainY,scoring=self.scoreParam,cv=self.cvSplit) return score.mean() def f(self,params): best=self.best count=self.count parameters=params count += 1 classObj=OptimizationTq(self.model,self.params,self.cvSplit,self.scoreParam,self.nIter,self.trainX,self.trainY) acc = classObj.bayesianOpt(parameters.copy()) return {'loss':-acc,'score': acc, 'status': STATUS_OK,'model' :self.model,'params': params}
cloudServer.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import boto3 import json import time import requests import datetime import uuid import shutil from websocket import create_connection from botocore.exceptions import ClientError import tarfile from pathlib import Path, PurePosixPath from stat import S_ISDIR from fabric import Connection import time import logging class awsGPUTraining(): def __init__(self, config): local_config = {"location":{"data":"aion/data/od", "code":"", "pretrainedModel":"aion/pretrainedModels"}, "jupyter":{"header":{"Authorization":"Token f3af05d5348301997fb014f245569e872d27bb9018fd70d2"}, "portNo":"8888", "notebook_path":"aion/code/AWS_GPU_OD_Training.ipynb"}} self.serverConfig = config["server"] self.sshConfig = config["ssh"] self.log = logging.getLogger('eion') self.codeLocation = local_config["location"]["code"] self.dataLocation = local_config["location"]["data"] self.pretrainedModelLocation = local_config["location"]["pretrainedModel"] self.jupyterConfig = local_config["jupyter"] self.serverIP = "" if self.serverConfig["awsAccessKeyId"] == "" or self.serverConfig["awsSecretAccessKey"] == "": raise ValueError("Cloud server configuration is not available.") if len(self.serverConfig["InstanceIds"]) == 0 and self.serverConfig["amiId"] == "": raise ValueError("Please provide either InstanceIds or amiId in server config") self.instanceId = [] self.separate_instance = False if self.serverConfig["amiId"] != "": self.separate_instance = True else: if len(self.serverConfig["InstanceIds"]): if isinstance(self.serverConfig["InstanceIds"], list): self.instanceId = self.serverConfig["InstanceIds"] elif isinstance(self.serverConfig["InstanceIds"], str): self.instanceId = [self.serverConfig["InstanceIds"]] self.ec2_client = boto3.client(self.serverConfig["serverName"], region_name=self.serverConfig["regionName"], aws_access_key_id=self.serverConfig["awsAccessKeyId"], aws_secret_access_key=self.serverConfig["awsSecretAccessKey"]) def __sftp_exists(self, sftp, path): try: sftp.stat(path) return True except:# IOError, e: #if e.errno == errno.ENOENT: return False def __rmtree(self, sftp, remotepath, level=0): for f in sftp.listdir_attr(remotepath): rpath = str(PurePosixPath(remotepath)/f.filename) if S_ISDIR(f.st_mode): self.__rmtree(sftp, rpath, level=(level + 1)) sftp.rmdir(rpath) else: rpath = str(PurePosixPath(remotepath)/f.filename) sftp.remove(rpath) def copy_files_to_server(self, location): try: client = Connection( host=self.serverIP, user=self.sshConfig["userName"], connect_kwargs={ "key_filename": self.sshConfig["keyFilePath"], }, ) client.sudo('rm -rf {}/*'.format(self.dataLocation)) tarFile = str((PurePosixPath(self.dataLocation).parent/PurePosixPath(self.dataLocation).name).with_suffix(".tar.gz")) client.put(location+'/test.tfrecord', self.dataLocation+'/test.tfrecord') client.put(location+'/train.tfrecord', self.dataLocation+'/train.tfrecord') client.put(location+'/pipeline.config', self.dataLocation+'/pipeline.config') client.put(location+'/label_map.pbtxt', self.dataLocation+'/label_map.pbtxt') client.put(location+'/model.config', self.dataLocation+'/model.config') if self.jupyterConfig != "": client.run("touch {}".format(self.dataLocation+'/log.txt')) except Exception as e: raise ValueError("Error in copying data to cloud server. " + str(e)) def __myexec(self, ssh, cmd, timeout, want_exitcode=False): # one channel per command stdin, stdout, stderr = ssh.exec_command(cmd) # get the shared channel for stdout/stderr/stdin channel = stdout.channel # we do not need stdin. stdin.close() # indicate that we're not going to write to that channel anymore channel.shutdown_write() # read stdout/stderr in order to prevent read block hangs stdout_chunks = [] stdout_chunks.append(stdout.channel.recv(len(stdout.channel.in_buffer))) # chunked read to prevent stalls while not channel.closed or channel.recv_ready() or channel.recv_stderr_ready(): # stop if channel was closed prematurely, and there is no data in the buffers. got_chunk = False readq, _, _ = select.select([stdout.channel], [], [], timeout) for c in readq: if c.recv_ready(): stdout_chunks.append(stdout.channel.recv(len(c.in_buffer))) got_chunk = True if c.recv_stderr_ready(): # make sure to read stderr to prevent stall stderr.channel.recv_stderr(len(c.in_stderr_buffer)) got_chunk = True ''' 1) make sure that there are at least 2 cycles with no data in the input buffers in order to not exit too early (i.e. cat on a >200k file). 2) if no data arrived in the last loop, check if we already received the exit code 3) check if input buffers are empty 4) exit the loop ''' if not got_chunk \ and stdout.channel.exit_status_ready() \ and not stderr.channel.recv_stderr_ready() \ and not stdout.channel.recv_ready(): # indicate that we're not going to read from this channel anymore stdout.channel.shutdown_read() # close the channel stdout.channel.close() break # exit as remote side is finished and our bufferes are empty # close all the pseudofiles stdout.close() stderr.close() if want_exitcode: # exit code is always ready at this point return (''.join(stdout_chunks), stdout.channel.recv_exit_status()) return ''.join(stdout_chunks) def __myexec1(self, ssh, cmd, timeout, want_exitcode=False): # one channel per command stdin, stdout, stderr = ssh.exec_command(cmd, get_pty=True) for line in iter(stderr.readline, ""): print(line, end="") stdin.close() stdout.close() stderr.close() def executeCode(self): try: client = Connection( host=self.serverIP, user=self.sshConfig["userName"], connect_kwargs={ "key_filename": self.sshConfig["keyFilePath"], }, ) cmd = 'python3.8 {} {} {}'.format(self.codeLocation, self.dataLocation, self.pretrainedModelLocation) client.run( cmd) except Exception as e: raise ValueError("Error in running code on cloud server. " + str(e)) def start_executing_notebook(self): try: publicIp_Port = self.serverIP + ":" + self.jupyterConfig["portNo"] conURL = "ws://" + publicIp_Port base = 'http://' + publicIp_Port + '' headers = self.jupyterConfig["header"] url = base + '/api/kernels' flag = True while flag: # deadlock need to add timeout response = requests.post(url, headers=headers) flag = False kernel = json.loads(response.text) # Load the notebook and get the code of each cell url = base + '/api/contents/' + self.jupyterConfig["notebook_path"] response = requests.get(url, headers=headers) file = json.loads(response.text) code = [c['source'] for c in file['content']['cells'] if len(c['source']) > 0 and c['cell_type']=='code' ] ws = create_connection(conURL + "/api/kernels/" + kernel["id"] + "/channels", header=headers) def send_execute_request(code): msg_type = 'execute_request'; content = {'code': code, 'silent': False} hdr = {'msg_id': uuid.uuid1().hex, 'username': 'test', 'session': uuid.uuid1().hex, 'data': datetime.datetime.now().isoformat(), 'msg_type': msg_type, 'version': '5.0'} msg = {'header': hdr, 'parent_header': hdr, 'metadata': {}, 'content': content} return msg for c in code: ws.send(json.dumps(send_execute_request(c))) # We ignore all the other messages, we just get the code execution output # (this needs to be improved for production to take into account errors, large cell output, images, etc.) error_msg = '' traceback_msg = '' for i in range(0, len(code)): msg_type = ''; while msg_type != "stream": rsp = json.loads(ws.recv()) msg_type = rsp["msg_type"] if msg_type == 'error': raise ValueError("Error on Cloud machine: "+rsp['content']['evalue']) ws.close() self.log.info('Status:- |...Execution Started`') except ClientError as e: raise ValueError(e) def __wait_for_completion(self, sftp, remoteLogFile, localLogFile): waiting = True error_msg = "" while waiting: time.sleep(5 * 60) try: sftp.get(str(remoteLogFile), str(localLogFile)) with open(localLogFile, "r") as f: content = f.readlines() for x in content: if "Error" in x: waiting = False error_msg = x if "success" in x: waiting = False except: raise (str(e)) return error_msg def copy_file_from_server(self, localPath): try: client = Connection( host=self.serverIP, user=self.sshConfig["userName"], connect_kwargs={ "key_filename": self.sshConfig["keyFilePath"], }, ) remoteLogFile = PurePosixPath(self.dataLocation)/'log.txt' localLogFile = Path(localPath)/'remote_log.txt' client.get(str(remoteLogFile), str(localLogFile)) tarFile = (PurePosixPath(self.dataLocation).parent/PurePosixPath(self.dataLocation).name).with_suffix(".tar.gz") client.get(str(tarFile), str(Path(localPath)/tarFile.name)) except: raise return str(Path(localPath)/tarFile.name) def create_instance(self): instances = self.ec2_client.run_instances( ImageId=self.serverConfig["amiId"], MinCount=1, MaxCount=1, InstanceType="t2.xlarge", KeyName="AION_GPU", SecurityGroupIds = ["sg-02c3a6c8dd67edb74"] ) self.instanceId = [instances['Instances'][0]['InstanceId']] def start_instance(self): if self.separate_instance: self.create_instance() try: response = self.ec2_client.start_instances(InstanceIds=self.instanceId, DryRun=True) except Exception as e: if 'DryRunOperation' not in str(e): raise ValueError("Error in starting the EC2 instance, check server configuration. " + str(e)) try: running_state_code = 16 response = self.ec2_client.start_instances(InstanceIds=self.instanceId, DryRun=False) instance_status_code = 0 while instance_status_code != running_state_code: response = self.ec2_client.describe_instances(InstanceIds=self.instanceId) instance_status_code = response['Reservations'][0]['Instances'][0]['State']['Code'] if instance_status_code == running_state_code: self.serverIP = response['Reservations'][0]['Instances'][0]['PublicIpAddress'] break except ClientError as e: raise ValueError("Error in starting the EC2 instance. " + str(e)) def terminate_instance(self): ec2 = boto3.resource(self.serverConfig["serverName"], region_name=self.serverConfig["regionName"], aws_access_key_id=self.serverConfig["awsAccessKeyId"], aws_secret_access_key=self.serverConfig["awsSecretAccessKey"]) ec2.instances.filter(InstanceIds=self.instanceId).terminate() # for terminating an ec2 instance def stop_server_instance(self): try: self.ec2_client.stop_instances(InstanceIds=self.instanceId, DryRun=True) except Exception as e: if 'DryRunOperation' not in str(e): raise stopped_state_code = 80 # Dry run succeeded, call stop_instances without dryrun try: response = self.ec2_client.stop_instances(InstanceIds=self.instanceId, DryRun=False) response = self.ec2_client.describe_instances(InstanceIds=self.instanceId) instance_status_code = 0 while instance_status_code != stopped_state_code: response = self.ec2_client.describe_instances(InstanceIds=self.instanceId) instance_status_code = response['Reservations'][0]['Instances'][0]['State']['Code'] if instance_status_code == stopped_state_code: break except: raise ValueError("Error in stopping the EC2 instance {}.Please stop it manually ".format(self.instanceId[0])) if self.separate_instance: try: self.terminate_instance() except: raise ValueError("Error in terminating the EC2 instance {}.Please terminate it manually ".format(self.instanceId[0]))
classificationModel.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import time import os import sys import numpy as np from numpy import arange from numpy import argmax import json from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.metrics import recall_score from sklearn.metrics import precision_score from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.linear_model import SGDClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import roc_curve from sklearn.metrics import roc_auc_score from sklearn.metrics import f1_score from sklearn.svm import SVC from xgboost import XGBClassifier from lightgbm import LGBMClassifier from catboost import CatBoostClassifier from sklearn.preprocessing import binarize from learner.optimizetechnique import OptimizationTq from learner.defaultAlgos import defaultParams from learner.parameters import parametersDefine from hyperopt import fmin, tpe, hp, STATUS_OK, Trials import logging from learner.aion_matrix import aion_matrix import mlflow from pathlib import Path from uncertainties.aionUQ import aionUQ # apply threshold to positive probabilities to create labels def to_labels(pos_probs, threshold): return (pos_probs >= threshold).astype('int') class ClassifierModel(): def __init__(self,modelList,params,scoreParam,cvSplit,numIter,geneticParam,trainX,trainY,testX,testY,method,modelType,MakeFP0,MakeFN0,deployLocation): self.modelList =modelList self.params =params self.trainX =trainX self.X =trainX self.trainY =trainY self.testX = testX self.testY = testY self.method =method self.scoreParam=scoreParam self.cvSplit=cvSplit self.numIter=numIter self.geneticParam=geneticParam self.MakeFP0= MakeFP0 self.MakeFN0=MakeFN0 self.log = logging.getLogger('eion') self.modelType = modelType self.uq_x_train = trainX self.uq_x_test = testX self.uq_y_train = trainY self.uq_y_test = testY self.deployLocation = deployLocation self.AlgorithmNames={'Logistic Regression':'LogisticRegression','Stochastic Gradient Descent':'SGDClassifier','Naive Bayes':'GaussianNB','Support Vector Machine':'SVC','K Nearest Neighbors':'KNeighborsClassifier','Decision Tree':'DecisionTreeClassifier','Random Forest':'RandomForestClassifier','Gradient Boosting':'GradientBoostingClassifier','Extreme Gradient Boosting (XGBoost)':'XGBClassifier','Categorical Boosting (CatBoost)': 'CatBoostClassifier','Light Gradient Boosting (LightGBM)': 'LGBMClassifier','Bagging (Ensemble)':'BaggingClassifier','Stacking (Ensemble)':'StackingClassifier','Voting (Ensemble)':'VotingClassifier','Deep Q Network':'DQN','Dueling Deep Q Network':'DDQN','Neural Architecture Search':'NAS'} self.modelToAlgoNames = {value: key for key, value in self.AlgorithmNames.items()} def check_threshold(self,estimator,testX,testY,threshold_range,checkParameter,modelName): thresholdx = -1 for threshold in threshold_range: predictedData = estimator.predict_proba(testX) predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold) p_score = precision_score(testY, predictedData) #self.log.info('-------------> Precision:'+str(p_score)) r_score = recall_score(testY, predictedData) #self.log.info('-------------> Rscore:'+str(r_score)) #self.log.info(confusion_matrix(testY, predictedData)) tn, fp, fn, tp = confusion_matrix(testY, predictedData).ravel() if(checkParameter.lower() == 'fp'): if fp == 0: if(p_score == 1): thresholdx = threshold self.log.info('---------------> Best Threshold:'+str(threshold)) self.log.info('---------------> Best Precision:'+str(p_score)) self.log.info('---------------> Best Recall:'+str(r_score)) self.log.info('---------------> TN:'+str(tn)) self.log.info('---------------> FP:'+str(fp)) self.log.info('---------------> FN:'+str(fn)) self.log.info('---------------> TP:'+str(tp)) break if(checkParameter.lower() == 'fn'): if fn == 0: if(r_score == 1): thresholdx = threshold self.log.info('---------------> Best Threshold:'+str(threshold)) self.log.info('---------------> Best Precision:'+str(p_score)) self.log.info('---------------> Best Recall:'+str(r_score)) self.log.info('---------------> TN:'+str(tn)) self.log.info('---------------> FP:'+str(fp)) self.log.info('---------------> FN:'+str(fn)) self.log.info('---------------> TP:'+str(tp)) break return(thresholdx,p_score,r_score) def getBestModel(self,fp0,fn0,threshold,bestthreshold,rscore,brscore,pscore,bpscore,tscore,btscore): cmodel = False if(threshold != -1): if(bestthreshold == -1): cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore elif fp0: if rscore > brscore: cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore elif rscore == brscore: if tscore > btscore or btscore == -0xFFFF: cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore elif fn0: if pscore > bpscore: cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore elif pscore == bpscore: if tscore > btscore or btscore == -0xFFFF: cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore else: if tscore > btscore or btscore == -0xFFFF: cmodel = True btscore = tscore else: if(bestthreshold == -1): if tscore > btscore or btscore == -0xFFFF: cmodel = True btscore = tscore return cmodel,btscore,bestthreshold,brscore,bpscore def logMlflow(self, runName, params, metrices, estimator, algoName=None): with mlflow.start_run(run_name = runName): for k,v in params.items(): mlflow.log_param(k, v) for k,v in metrices.items(): mlflow.log_metric(k, v) if algoName == 'CatBoostClassifier': mlflow.catboost.log_model(estimator, "model") else: mlflow.sklearn.log_model(estimator, "model") model_uri = mlflow.get_artifact_uri("model") """ for some dataset evaluate takes more than 90 min, so commenting till some solution is not found evaluate_data = self.testX.copy() evaluate_data['label'] = self.testY.copy() mlflow.evaluate(model_uri, data=evaluate_data, targets='label', model_type="classifier") del evaluate_data """ def classModelling(self, modelOrFeatureBased,code_configure): paramObj=parametersDefine() bestModel='None' bestParams={} bestScore=-0xFFFF bestEstimator = 'None' bestpipelineModel='None' scoredetails = '' threshold = -1 bestthreshold = -1 precisionscore =-1 bestprecisionscore=-1 recallscore = -1 bestrecallscore=-1 self.log.info('\n---------- ClassifierModel has started ----------') objClf = aion_matrix() try: self.log.info('Status:- |... Search Optimization Method applied: '+self.method) for modelName in self.modelList: if modelName in ['Bagging (Ensemble)','Voting (Ensemble)','Stacking (Ensemble)','Dueling Deep Q Network','Deep Q Network','Neural Architecture Search']: if modelName == 'Bagging (Ensemble)': from ensemble.ensemble_bagging import ensemble_bagging ensemble_bagging_obj = ensemble_bagging(self.params[modelName],self.scoreParam,self.MakeFP0,self.MakeFN0) estimator,modelParams,score,model,threshold,precisionscore,recallscore = ensemble_bagging_obj.ensemble_bagging_classifier(self.trainX,self.trainY,self.testX,self.testY) if modelName == 'Stacking (Ensemble)': from ensemble.ensemble_stacking import ensemble_stacking ensemble_stacking_obj = ensemble_stacking(self.params[modelName],self.scoreParam) estimator,modelParams,score,model,threshold,precisionscore,recallscore = ensemble_stacking_obj.ensemble_stacking_classifier(self.trainX,self.trainY,self.testX,self.testY,self.MakeFP0,self.MakeFN0,self.modelList) if modelName == 'Voting (Ensemble)': from ensemble.ensemble_voting import ensemble_voting ensemble_voting_obj = ensemble_voting("",self.scoreParam) #bug 12437 status,estimator,modelParams,score,model,threshold,precisionscore,recallscore = ensemble_voting_obj.ensemble_voting_classifier(self.trainX,self.trainY,self.testX,self.testY,self.MakeFP0,self.MakeFN0,self.modelList) if status != "SUCCESS": #bug 12437 continue if modelName == 'Deep Q Network': from reinforcement.DRL_train import ReinformentLearning rlObj = ReinformentLearning(self.params[modelName],self.scoreParam,'Classification') estimator,modelParams,score,model,threshold,precisionscore,recallscore = rlObj.TrainRL(self.trainX,self.trainY,self.testX,self.testY,'DQN',self.deployLocation) if modelName == 'Dueling Deep Q Network': from reinforcement.DRL_train import ReinformentLearning rlObj = ReinformentLearning(self.params[modelName],self.scoreParam,'Classification') estimator,modelParams,score,model,threshold,precisionscore,recallscore = rlObj.TrainRL(self.trainX,self.trainY,self.testX,self.testY,'DDQN',self.deployLocation) ''' if modelName == 'Neural Architecture Search': from nas.aionNAS import aionNAS objNAS = aionNAS('Classification',self.params[modelName],self.trainX,self.testX,self.trainY,self.testY,self.deployLocation) estimator,modelParams,score,model,threshold,precisionscore,recallscore=objNAS.nasMain(self.scoreParam) ''' if(scoredetails != ''): scoredetails += ',' scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","FeatureEngineering":"'+str(modelOrFeatureBased)+'","Score":'+str(score)+',"ModelUncertainty":"NA"}' status,bscore,bthres,brscore,bpscore = self.getBestModel(self.MakeFP0,self.MakeFN0,threshold,bestthreshold,recallscore,bestrecallscore,precisionscore,bestprecisionscore,score,bestScore) if status: bestScore =bscore bestModel =model bestParams=modelParams bestEstimator=estimator bestthreshold = bthres bestrecallscore = brscore bestprecisionscore = bpscore self.log.info('Status:- |... ML Algorithm applied: '+modelName) self.log.info('Status:- |... Score: '+objClf.get_print_score(self.scoreParam)+'='+str(round(score,2))+'\n') continue paramSpace=self.params[modelName].copy() algoName = self.AlgorithmNames[modelName] paramDict =paramObj.paramDefine(paramSpace,self.method) if not self.method == 'bayesopt': paramSize = paramObj.getParamSpaceSize(paramDict) else: paramSize = 0 if (self.method == 'bayesopt' and not paramDict) or (not self.method == 'bayesopt' and paramSize<=0): try: start = time.time() #function call defObj = defaultParams(algoName,paramDict,self.scoreParam,self.MakeFP0, self.MakeFN0,paramSize) estimator, modelParams, model,score, threshold, precisionscore, recallscore =defObj.startTrainingClassification(self.trainX,self.trainY,self.testX,self.testY) executionTime = time.time() - start if (scoredetails != ''): scoredetails += ',' scoredetails += '{"Model":"' + self.modelToAlgoNames[model] + '","FeatureEngineering":"' + str( modelOrFeatureBased) + '","Score":' + str(score) + ',"ModelUncertainty":"NA"}' status, bscore, bthres, brscore, bpscore = self.getBestModel(self.MakeFP0, self.MakeFN0,threshold, bestthreshold,recallscore, bestrecallscore,precisionscore, bestprecisionscore,score, bestScore) self.log.info('---------> Total Execution: ' + str(executionTime) + '\n') if status: bestScore = bscore bestModel = model bestParams = modelParams bestEstimator = estimator bestthreshold = bthres bestrecallscore = brscore bestprecisionscore = bpscore self.log.info('Status:- |... ML Algorithm applied: ' + modelName) self.log.info('Status:- |... Score: ' + objClf.get_print_score(self.scoreParam) + '=' + str( round(score, 2)) + '\n') except Exception as inst: self.log.info('\n < ---------- Model Execution Failed Start--------->') self.log.info('\n<-------' + modelName + ' Model Execution failed!!!.' + str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) self.log.info('\n < ---------- Model Execution Failed End --------->') continue # call algorithms with default valuepass if self.method == 'bayesopt': code_configure.add_model(algoName,paramSpace) else: paramDictCopy = paramDict # numpy array is not json serializable #numpy is already imported but still np.ndarray raise error import numpy as np for key,value in paramDictCopy.items(): if isinstance(value, np.ndarray): paramDictCopy[key] = paramDictCopy[key].tolist() code_configure.add_model(algoName,paramDictCopy) trainingStatus = 'Success' if self.method =='grid': try: self.log.info("-------> Optimization Method :Grid Search") self.log.info("-------> Model Name: "+str(modelName)) opTq =OptimizationTq(algoName,paramDict,self.cvSplit,self.scoreParam,self.numIter,self.trainX,self.trainY) start = time.time() model,modelParams,score,estimator=opTq.gridSearchOpt() executionTime=time.time() - start if not self.testX.empty: predictedData = estimator.predict(self.testX) score = objClf.get_score(self.scoreParam,self.testY,predictedData) else: score = score*100 problemName = estimator.__class__.__name__ runName = algoName + '_' + modelOrFeatureBased metrices = {} metrices["score"] = score try: self.logMlflow(runName, modelParams, metrices, estimator, algoName) except Exception as e: self.log.info('----------> ML Flow error!!!. ' + str(e)) # usnish pass output_jsonobject = "" problemName = estimator.__class__.__name__ self.log.info('----------> Testing Score: '+str(score)) try: if ((estimator.__class__.__name__ == "ABCMeta") or (model in ['SGDClassifier','XGBClassifier','CatBoostClassifier','LGBMClassifier']) ): self.log.info('-----> Model Uncertainty Not Supported') else: uqObj=aionUQ(None,None,None,problemName,modelParams,estimator,None,None,self.deployLocation) accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertainty_per=uqObj.uqMain_BBMClassification(self.uq_x_train,self.uq_x_test,self.uq_y_train,self.uq_y_test,"aionuq") self.log.info("-------> model_confidence: "+str(model_confidence_per)+str('%')) self.log.info("-------> model_uncertainty: "+str(model_uncertainty_per)+str('%')) except: pass if(scoredetails != ''): scoredetails += ',' scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","FeatureEngineering":"'+str(modelOrFeatureBased)+'","Score":'+str(score)+',"ModelUncertainty":'+str(json.dumps(output_jsonobject))+'}' self.log.info('----------> Testing Score: '+str(score)) import numpy as np if self.MakeFP0: self.log.info('-------- Calculate Threshold for FP Start-------') startRange = 0.0 endRange = 1.0 stepsize = 0.01 threshold_range = np.arange(startRange,endRange,stepsize) threshold,precisionscore,recallscore = self.check_threshold(estimator,self.trainX,self.trainY,threshold_range,'FP',algoName) self.log.info('-------- Calculate Threshold for FP End-------') if self.MakeFN0: self.log.info('-------- Calculate Threshold for FN Start-------') startRange = 1.0 endRange = 0.0 stepsize = -0.01 threshold_range = np.arange(startRange,endRange,stepsize) threshold,precisionscore,recallscore = self.check_threshold(estimator,self.trainX,self.trainY,threshold_range,'FN',algoName) self.log.info('-------- Calculate Threshold for FN End-------') self.log.info('----------> Total Execution: '+str(executionTime)+'\n') status,bscore,bthres,brscore,bpscore = self.getBestModel(self.MakeFP0,self.MakeFN0,threshold,bestthreshold,recallscore,bestrecallscore,precisionscore,bestprecisionscore,score,bestScore) if status: bestScore =bscore bestModel =model bestParams=modelParams bestEstimator=estimator bestthreshold = bthres bestrecallscore = brscore bestprecisionscore = bpscore except Exception as inst: self.log.info('\n < ---------- Model Execution Failed Start--------->') self.log.info('\n<-------'+ modelName+' Model Execution failed!!!.'+str(inst)) self.log.info('\n < ---------- Model Execution Failed End --------->') trainingStatus = 'Error (Exception)' elif self.method == 'random': try: self.log.info("-------> Optimization Method :Random Search") self.log.info("-------> Model Name: "+str(modelName)) start = time.time() opTq =OptimizationTq(algoName,paramDict,self.cvSplit,self.scoreParam,self.numIter,self.trainX,self.trainY) model,modelParams,score,estimator=opTq.randomSearchOpt() executionTime=time.time() - start if not self.testX.empty: predictedData = estimator.predict(self.testX) score = objClf.get_score(self.scoreParam,self.testY,predictedData) else: score = score*100 problemName = estimator.__class__.__name__ runName = algoName + '_' + modelOrFeatureBased metrices = {} metrices["score"] = score try: self.logMlflow(runName, modelParams, metrices, estimator, algoName) except Exception as e: self.log.info('----------> ML Flow error!!!. ' + str(e)) # usnish pass import numpy as np if self.MakeFP0: self.log.info('-------- Calculate Threshold for FP Start-------') startRange = 0.0 endRange = 1.0 stepsize = 0.01 threshold_range = np.arange(startRange,endRange,stepsize) threshold,precisionscore,recallscore = self.check_threshold(estimator,self.trainX,self.trainY,threshold_range,'FP',algoName) self.log.info('-------- Calculate Threshold for FP End-------') if self.MakeFN0: self.log.info('-------- Calculate Threshold for FN Start-------') startRange = 1.0 endRange = 0.0 stepsize = -0.01 threshold_range = np.arange(startRange,endRange,stepsize) threshold,precisionscore,recallscore = self.check_threshold(estimator,self.trainX,self.trainY,threshold_range,'FN',algoName) self.log.info('-------- Calculate Threshold for FN End-------') if threshold != -1: if not self.testX.empty: predictedData = estimator.predict_proba(self.testX) predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold) score = objClf.get_score(self.scoreParam,self.testY,predictedData) else: predictedData = estimator.predict_proba(self.trainX) predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold) score = objClf.get_score(self.scoreParam,self.trainY,predictedData) self.log.info('---------> Total Execution: '+str(executionTime)+'\n') output_jsonobject = "" problemName = estimator.__class__.__name__ self.log.info('----------> Testing Score: '+str(score)) try: if ((estimator.__class__.__name__ == "ABCMeta") or (model in ['SGDClassifier','XGBClassifier','CatBoostClassifier','LGBMClassifier']) ): self.log.info('-----> Model Uncertainty Not Supported') else: uqObj=aionUQ(None,None,None,problemName,modelParams,estimator,None,None,self.deployLocation) accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertainty_per=uqObj.uqMain_BBMClassification(self.uq_x_train,self.uq_x_test,self.uq_y_train,self.uq_y_test,"aionuq") self.log.info("-------> model_confidence: "+str(model_confidence_per)+str('%')) self.log.info("-------> model_uncertainty: "+str(model_uncertainty_per)+str('%')) except Exception as e: pass if(scoredetails != ''): scoredetails += ',' scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","FeatureEngineering":"'+str(modelOrFeatureBased)+'","Score":'+str(score)+',"ModelUncertainty":'+str(json.dumps(output_jsonobject))+'}' status,bscore,bthres,brscore,bpscore = self.getBestModel(self.MakeFP0,self.MakeFN0,threshold,bestthreshold,recallscore,bestrecallscore,precisionscore,bestprecisionscore,score,bestScore) if status: bestScore =bscore bestModel =model bestParams=modelParams bestEstimator=estimator bestthreshold = threshold bestrecallscore = recallscore bestprecisionscore = precisionscore except Exception as inst: self.log.info('\n < ---------- Model Execution Failed Start--------->') self.log.info('\n<-------'+ modelName+' Model Execution failed!!!.'+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) self.log.info('\n < ---------- Model Execution Failed End --------->') trainingStatus = 'Error (Exception)' elif self.method == 'bayesopt': try: self.log.info("-------> Optimization Method :BayesOpt") self.log.info("-------> Model Name: "+str(modelName)) opTq =OptimizationTq(algoName,paramDict,self.cvSplit,self.scoreParam,self.numIter,self.trainX,self.trainY) fun=opTq.f trials = Trials() start = time.time() best = fmin(fun,paramDict,algo=tpe.suggest, max_evals=self.numIter, trials=trials) executionTime=time.time() - start results = sorted(trials.results, key = lambda x: x['loss']) bestresult=results[0] model=bestresult['model'] score=bestresult['score'] modelParams=bestresult['params'] executionTime=time.time() - start res = ', '.join("{!s}={!r}".format(key,val) for (key,val) in modelParams.items()) modelObj=eval(model+'('+res+')') estimator = modelObj.fit(self.trainX,self.trainY) if not self.testX.empty: predictedData = estimator.predict(self.testX) score = objClf.get_score(self.scoreParam,self.testY,predictedData) problemName = estimator.__class__.__name__ runName = algoName + '_' + modelOrFeatureBased metrices = {} metrices["score"] = score try: self.logMlflow(runName, modelParams, metrices, estimator, algoName) except Exception as e: self.log.info('----------> ML Flow error!!!. ' + str(e)) # usnish pass output_jsonobject = "" problemName = estimator.__class__.__name__ self.log.info('----------> Testing Score: '+str(score)) try: if ((estimator.__class__.__name__ == "ABCMeta") or (model in ['SGDClassifier','XGBClassifier','CatBoostClassifier','LGBMClassifier']) ): self.log.info('-----> Model Uncertainty Not Supported') else: uqObj=aionUQ(None,None,None,problemName,modelParams,estimator,None,None,self.deployLocation) accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertainty_per=uqObj.uqMain_BBMClassification(self.uq_x_train,self.uq_x_test,self.uq_y_train,self.uq_y_test,"aionuq") self.log.info("-------> model_confidence: "+str(model_confidence_per)+str('%')) self.log.info("-------> model_uncertainty: "+str(model_uncertainty_per)+str('%')) except: pass if(scoredetails != ''): scoredetails += ',' scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","FeatureEngineering":"'+str(modelOrFeatureBased)+'","Score":'+str(score)+',"ModelUncertainty":'+str(json.dumps(output_jsonobject))+'}' ''' test_accuracy = accuracy_score(self.testY,predictedData) test_precision = precision_score(self.testY,predictedData,average='macro') self.log.info('---------> Test Accuracy: '+str(test_accuracy)) self.log.info('---------> Test Precision: '+str(test_precision)) ''' import numpy as np if self.MakeFP0: self.log.info('-------- Calculate Threshold for FP Start-------') startRange = 0.0 endRange = 1.0 stepsize = 0.01 threshold_range = np.arange(startRange,endRange,stepsize) threshold,precisionscore,recallscore = self.check_threshold(estimator,self.testX,self.testY,threshold_range,'FP',algoName) self.log.info('-------- Calculate Threshold for FP End-------') if self.MakeFN0: self.log.info('-------- Calculate Threshold for FN Start-------') startRange = 1.0 endRange = 0.0 stepsize = -0.01 threshold_range = np.arange(startRange,endRange,stepsize) threshold,precisionscore,recallscore = self.check_threshold(estimator,self.testX,self.testY,threshold_range,'FN',algoName) self.log.info('-------- Calculate Threshold for FN End-------') self.log.info('---------> Total Execution: '+str(executionTime)+'\n') status,bscore,bthres,brscore,bpscore = self.getBestModel(self.MakeFP0,self.MakeFN0,threshold,bestthreshold,recallscore,bestrecallscore,precisionscore,bestprecisionscore,score,bestScore) if status: bestScore =score bestModel =model bestParams=modelParams res = ', '.join("{!s}={!r}".format(key,val) for (key,val) in bestParams.items()) modelObj=eval(bestModel+'('+res+')') bestEstimator=estimator bestthreshold = threshold bestrecallscore = recallscore bestprecisionscore = precisionscore except Exception as inst: self.log.info('\n < ---------- Model Execution Failed Start--------->') self.log.info('\n<-------'+ modelName+' Model Execution failed!!!.'+str(inst)) self.log.info('\n < ---------- Model Execution Failed End --------->') trainingStatus = 'Error (Exception)' else: trainingStatus = 'Error (HyperTunning Algo Not Supported)' pass self.log.info('Status:- |... ML Algorithm applied: '+modelName) if trainingStatus.lower() == 'success': self.log.info('Status:- |... Score after hyperparameter tuning: '+objClf.get_print_score(self.scoreParam)+'='+str(round(score,2))+'\n') else: self.log.info('Status:- |... Training Error : '+trainingStatus+'\n') self.log.info('---------- ClassifierModel End ---------- \n') if bestModel != 'None': self.log.info('\n------- Best Model and its parameters -------------') self.log.info('Status:- |... Best Algorithm selected: '+str(self.modelToAlgoNames[bestModel])+' Score='+str(round(bestScore,2))) self.log.info("-------> Best Name: "+str(bestModel)) self.log.info("-------> Best Score: "+str(bestScore)) return self.modelToAlgoNames[bestModel],bestParams,bestScore,bestEstimator,scoredetails,bestthreshold,bestprecisionscore,bestrecallscore else: raise Exception("Sorry, no model is trained") except Exception as inst: self.log.info( '\n-----> ClassifierModel failed!!!.'+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
aion_matrix.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import warnings warnings.filterwarnings('ignore') import pandas as pd from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.metrics import roc_curve, auc from sklearn.metrics import roc_auc_score from sklearn.metrics import accuracy_score from sklearn.metrics import r2_score from sklearn.metrics import mean_absolute_error,make_scorer from sklearn.metrics import mean_squared_error from sklearn.metrics import recall_score from sklearn.metrics import precision_score from sklearn.metrics import f1_score import logging import numpy as np from sklearn.preprocessing import binarize from sklearn.preprocessing import LabelBinarizer from sklearn.metrics import r2_score from sklearn.metrics import mean_absolute_error,make_scorer from sklearn.metrics import mean_squared_error class aion_matrix: def __init__(self): self.log = logging.getLogger('eion') def get_print_score(self,matrix): if 'accuracy' in str(matrix).lower(): return 'Accuracy' elif 'recall' in str(matrix).lower(): return 'Recall' elif 'precision' in str(matrix).lower(): return 'Precision' elif 'f1_score' in str(matrix).lower(): return 'F1_Score' elif 'roc_auc' in str(matrix).lower(): return 'ROC_AUC' elif 'mse' in str(matrix).lower() or 'neg_mean_squared_error' in str(matrix).lower(): return 'Mean Squared Error(MSE)' elif 'rmse' in str(matrix).lower() or 'neg_root_mean_squared_error' in str(matrix).lower(): return 'Root Mean Suared Error(RMSE)' elif 'mae' in str(matrix).lower() or 'neg_mean_absolute_error' in str(matrix).lower(): return 'Mean Absolute Error (MAE)' elif 'r2' in str(matrix).lower(): return 'R-Squared(R2)' else: return 'Unknown' def get_score(self,matrix,actual,predict): if 'accuracy' in str(matrix).lower(): ensemble_score = accuracy_score(actual,predict) ensemble_score = ensemble_score*100 elif 'recall' in str(matrix).lower(): ensemble_score = recall_score(actual,predict,average='macro') ensemble_score = ensemble_score*100 elif 'precision' in str(matrix).lower(): ensemble_score = precision_score(actual,predict,average='macro') ensemble_score = ensemble_score*100 elif 'f1_score' in str(matrix).lower(): ensemble_score = f1_score(actual,predict, average='macro') ensemble_score = ensemble_score*100 elif 'roc_auc' in str(matrix).lower(): try: ensemble_score = roc_auc_score(actual,predict,average="macro") except: try: actual = pd.get_dummies(actual) predict = pd.get_dummies(predict) ensemble_score = roc_auc_score(actual,predict, average='weighted', multi_class='ovr') except: ensemble_score = 0 ensemble_score = ensemble_score*100 elif ('mse' in str(matrix).lower()) or ('neg_mean_squared_error' in str(matrix).lower()): ensemble_score = mean_squared_error(actual,predict) elif ('rmse' in str(matrix).lower()) or ('neg_root_mean_squared_error' in str(matrix).lower()): ensemble_score=mean_squared_error(actual,predict,squared=False) elif ('mae' in str(matrix).lower()) or ('neg_mean_absolute_error' in str(matrix).lower()): ensemble_score=mean_absolute_error(actual,predict) elif 'r2' in str(matrix).lower(): ensemble_score=r2_score(actual,predict) return round(ensemble_score,2) def getClassificationPerformaceMatrix(self,le_trainY,predictedData,labelMaps): setOfyTrue = set(le_trainY) unqClassLst = list(setOfyTrue) if(str(labelMaps) != '{}'): inv_mapping_dict = {v: k for k, v in labelMaps.items()} unqClassLst2 = (pd.Series(unqClassLst)).map(inv_mapping_dict) unqClassLst2 = list(unqClassLst2) else: unqClassLst2 = unqClassLst indexName = [] columnName = [] targetnames=[] for item in unqClassLst2: indexName.append("act:"+str(item)) columnName.append("pre:"+str(item)) targetnames.append(str(item)) matrixconfusion = pd.DataFrame(confusion_matrix(le_trainY,predictedData, labels = unqClassLst),index = indexName, columns = columnName) #pd.set_option('expand_frame_repr', False) pd.set_option('display.max_columns',len(targetnames)+2) self.log.info('-------> Confusion Matrix: ') self.log.info(matrixconfusion) pd.reset_option('display.max_columns') #pd.reset_option('expand_frame_repr') #self.log.info('-------> Confusion Matrix With Labels: ') #self.log.info(confusion_matrix(le_trainY,predictedData, labels = unqClassLst)) #print(unqClassLst2) classificationreport = pd.DataFrame(classification_report(le_trainY, predictedData, labels = unqClassLst,target_names=targetnames,output_dict=True)).transpose() self.log.info('-------> Classification Report: ') self.log.info(classificationreport) lb = LabelBinarizer() lb.fit(le_trainY) transformTarget= lb.transform(le_trainY) transformPredict = lb.transform(predictedData) rocaucscore = roc_auc_score(transformTarget,transformPredict,average="macro") self.log.info('-------> ROC AUC SCORE :'+str(rocaucscore)) matrixconfusion = matrixconfusion.to_json(orient='index') classificationreport = classificationreport.to_json(orient='index') matrix = '"ConfusionMatrix":'+matrixconfusion+',"ClassificationReport":'+classificationreport+',"ROC_AUC_SCORE":'+str(rocaucscore) return(matrix) def get_regression_matrix(self,targetData,predictedData): r2score=r2_score(targetData, predictedData) self.log.info('-------> R2_score :'+str(r2score)) meanabsoluteerror=(mean_absolute_error(targetData, predictedData)) self.log.info('-------> MAE :'+str(meanabsoluteerror)) meanssquatederror=mean_squared_error(targetData, predictedData) self.log.info('-------> MSE :'+str(meanssquatederror)) rootmeanssquatederror=mean_squared_error(targetData, predictedData,squared=False) self.log.info('-------> RMSE :'+str(rootmeanssquatederror)) targetArray, predictedArray = np.array(targetData), np.array(predictedData) try: EPSILON = 1e-10 meanpercentageerror=np.mean(np.abs((targetArray - predictedArray) / (targetArray+EPSILON)))*100 except ZeroDivisionError: meanpercentageerror = 0 self.log.info('-------> MAPE :'+str(meanpercentageerror)) try: normalised_rmse_percentage = round(((rootmeanssquatederror/ ( np.max(targetData) - np.min(targetData) )) * 100), 4) except Exception as e: normalised_rmse_percentage = -1 self.log.info('-------> Normalised RMSE percentage :'+str(normalised_rmse_percentage)) matrix = '"MAE":'+str(meanabsoluteerror)+',"R2Score":'+str(r2score)+',"MSE":'+str(meanssquatederror)+',"MAPE":'+str(meanpercentageerror)+',"RMSE":'+str(rootmeanssquatederror)+',"Normalised RMSE(%)":'+str(normalised_rmse_percentage) return matrix def getbestfeatureModel(self,modelType,scoreParam,score1,score2,model1,model2,threshold1,pscore1,rscore1,threshold2,pscore2,rscore2): best_feature_model = 'Model1' self.log.info('\n ---------- Summary Start ------------') if modelType.lower() == "classification": if(threshold1 == -1 and threshold2 == -1): if score1> score2: self.log.info('-------> Best Features: Model1') self.log.info('-------> Best Model: '+str(model1)) self.log.info('-------> Best Score: '+str(score1)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = 'Model1' else: self.log.info('-------> Best Features: Model2') self.log.info('-------> Best Model: '+str(model2)) self.log.info('-------> Best Score: '+str(score2)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = 'Model2' elif(threshold1 == -1): self.log.info('-------> Best Features: Model2') self.log.info('-------> Best Model: '+str(model2)) self.log.info('-------> Best Score: '+str(score2)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = 'Model2' elif(threshold1 == -2): self.log.info('-------> Best Features: Model1') self.log.info('-------> Best Model: '+str(model1)) self.log.info('-------> Best Score: '+str(score1)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = 'Model1' else: if pscore1 == pscore2: if rscore1 > rscore2: self.log.info('-------> Best Features: Model1') self.log.info('-------> Best Model: '+str(model1)) self.log.info('-------> Best Score: '+str(score1)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = 'Model1' else: self.log.info('-------> Best Features: Model2') self.log.info('-------> Best Model: '+str(model2)) self.log.info('-------> Best Score: '+str(score2)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = 'Model2' elif rscore1 == rscore2: if pscore1 > pscore2: self.log.info('-------> Best Features: Model1') self.log.info('-------> Best Model: '+str(model1)) self.log.info('-------> Best Score: '+str(score1)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = 'Model1' else: self.log.info('-------> Best Features: Model2') self.log.info('-------> Best Model: '+str(model2)) self.log.info('-------> Best Score: '+str(score2)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = 'Model2' elif modelType.lower() == "regression": if scoreParam == "r2" or scoreParam == "explained_variance": if score1> score2 : self.log.info('-------> Best Features: Model1') self.log.info('-------> Best Model: '+str(model1)) self.log.info('-------> Best Score: '+str(score1)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = 'Model1' else: self.log.info('-------> Best Features: Model2') self.log.info('-------> Best Model: '+str(model2)) self.log.info('-------> Best Score: '+str(score2)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = 'Model2' else: if score1< score2 : self.log.info('-------> Best Features: Model1') self.log.info('-------> Best Model: '+str(model1)) self.log.info('-------> Best Score: '+str(score1)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = 'Model1' else: self.log.info('-------> Best Features: Model2') self.log.info('-------> Best Model: '+str(model2)) self.log.info('-------> Best Score: '+str(score2)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = 'Model2' self.log.info('---------- Summary End ------------\n') return(best_feature_model) def check_threshold(self,estimator,testX,testY,threshold_range,checkParameter,modelName): thresholdx = -1 for threshold in threshold_range: predictedData = estimator.predict_proba(testX) predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold)#bug 12437 p_score = precision_score(testY, predictedData) #self.log.info('-------------> Precision:'+str(p_score)) r_score = recall_score(testY, predictedData) #self.log.info('-------------> Rscore:'+str(r_score)) #self.log.info(confusion_matrix(testY, predictedData)) tn, fp, fn, tp = confusion_matrix(testY, predictedData).ravel() if(checkParameter.lower() == 'fp'): if fp == 0: if(p_score == 1): thresholdx = threshold self.log.info('---------------> Best Threshold:'+str(threshold)) self.log.info('---------------> Best Precision:'+str(p_score)) self.log.info('---------------> Best Recall:'+str(r_score)) self.log.info('---------------> TN:'+str(tn)) self.log.info('---------------> FP:'+str(fp)) self.log.info('---------------> FN:'+str(fn)) self.log.info('---------------> TP:'+str(tp)) break if(checkParameter.lower() == 'fn'): if fn == 0: if(r_score == 1): thresholdx = threshold self.log.info('---------------> Best Threshold:'+str(threshold)) self.log.info('---------------> Best Precision:'+str(p_score)) self.log.info('---------------> Best Recall:'+str(r_score)) self.log.info('---------------> TN:'+str(tn)) self.log.info('---------------> FP:'+str(fp)) self.log.info('---------------> FN:'+str(fn)) self.log.info('---------------> TP:'+str(tp)) break return(thresholdx,p_score,r_score) def getBestModel(self,fp0,fn0,threshold,bestthreshold,rscore,brscore,pscore,bpscore,tscore,btscore): cmodel = False if(threshold != -1): if(bestthreshold == -1): cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore elif fp0: if rscore > brscore: cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore elif rscore == brscore: if tscore > btscore or btscore == -0xFFFF: cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore elif fn0: if pscore > bpscore: cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore elif pscore == bpscore: if tscore > btscore or btscore == -0xFFFF: cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore else: if tscore > btscore or btscore == -0xFFFF: cmodel = True btscore = tscore else: if(bestthreshold == -1): if tscore > btscore or btscore == -0xFFFF: cmodel = True btscore = tscore return cmodel,btscore,bestthreshold,brscore,bpscore
ImageLearning.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import tensorflow from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout, Flatten from tensorflow.keras.layers import Conv2D, MaxPooling2D from tensorflow.keras.utils import to_categorical from tensorflow.keras.preprocessing import image import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from tensorflow.keras.layers import Input from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam from tensorflow.keras.applications import VGG16 from tensorflow.keras.callbacks import EarlyStopping import logging from sklearn.preprocessing import LabelEncoder from statistics import mean import sys from learner.machinelearning import machinelearning from learner.aion_matrix import aion_matrix from profiler.imageAug import ImageAugmentation from pathlib import Path class ImageLearning: def __init__(self,dataFrame,input_directory,outputdir,modelname,hyperParam, AugEnabled,keepAugImages,operations,augConf): self.image_list = dataFrame self.input_directory = input_directory self.outputdir = outputdir self.modelname = modelname self.hyperParam = hyperParam self.labelMapping={} self.log = logging.getLogger('eion') self.AIONNumericDtypes=['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] self.AugEnabled = AugEnabled self.keepAugImages = keepAugImages self.operations = operations self.augConf = augConf def TrainCAST(self,predicted_data_file): datatype = self.image_list['Label'].dtypes if datatype not in self.AIONNumericDtypes: labelEncode= LabelEncoder() self.image_list['Label'] = self.image_list['Label'].apply(str) self.image_list['Label'] = labelEncode.fit_transform(self.image_list['Label']) self.labelMapping = dict(zip(labelEncode.classes_, labelEncode.transform(labelEncode.classes_))) self.log.info('\n-------> First Ten Rows of Input Data After Encoding: ') self.log.info(self.image_list.head(10)) self.log.info('Status:- |... Target Feature Encoding Done') if not os.path.exists(self.outputdir): os.makedirs(self.outputdir) train_df, test_df = train_test_split(self.image_list, random_state=42, test_size=self.hyperParam['test_split_ratio']) if self.AugEnabled: csv_file = "tempTrainDf.csv" train_df.to_csv(csv_file, index=False) ia = ImageAugmentation(self.input_directory, csv_file) csv_file = ia.augment("imageclassification", self.operations,None,self.augConf) train_df = pd.read_csv(csv_file) Path(csv_file).unlink() train_image = [] train_df.reset_index(drop=True, inplace=True) for i in range(train_df.shape[0]): #print(os.path.join(self.input_directory,str(self.image_list['File'][i]))) img = image.load_img(os.path.join(self.input_directory,str(train_df['File'][i])), target_size=(self.hyperParam['img_width'],self.hyperParam['img_height'],self.hyperParam['img_channel']), grayscale=False) img = image.img_to_array(img) img = img/255 train_image.append(img) test_image = [] test_df.reset_index(drop=True, inplace=True) for i in range(test_df.shape[0]): #print(os.path.join(self.input_directory,str(self.image_list['File'][i]))) img = image.load_img(os.path.join(self.input_directory,str(test_df['File'][i])), target_size=(self.hyperParam['img_width'],self.hyperParam['img_height'],self.hyperParam['img_channel']), grayscale=False) img = image.img_to_array(img) img = img/255 test_image.append(img) self.log.info('Status:- |... Image Loading Done') X_train = np.array(train_image) y_train = train_df['Label'] X_test = np.array(test_image) y_test = test_df['Label'] ytrain = y_train.values ytrain = to_categorical(ytrain) ytest = y_test.values ytest = to_categorical(ytest) #print(y) self.log.info("Loading Imagenet Weights...") if self.modelname == "densenet": self.log.info('Loading Densenet model') baseModel = tensorflow.keras.applications.DenseNet121(weights="imagenet", include_top=False, input_tensor=Input(shape=(self.hyperParam['img_width'], self.hyperParam['img_height'], self.hyperParam['img_channel']))) #98 elif self.modelname == "inception": self.log.info('Loading Inception model') baseModel = tensorflow.keras.applications.InceptionV3(weights="imagenet", include_top=False, input_tensor=Input(shape=(self.hyperParam['img_width'], self.hyperParam['img_height'], self.hyperParam['img_channel']))) #97 headModel = baseModel.output headModel = Flatten(name="flatten")(headModel) headModel = Dense(1024, activation='relu')(headModel) headModel = Dropout(0.5)(headModel) headModel = Dense(2, activation='sigmoid')(headModel) model = Model(inputs=baseModel.input, outputs=headModel) self.log.info("[INFO] compiling model...") opt = Adam(lr=self.hyperParam['lr']) model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"]) #early_stop = EarlyStopping(monitor='val_loss',patience=2) #history = model.fit(X_train, y_train, epochs=hyperparam_config['epochs'], validation_data=(X_test, y_test), callbacks=[early_stop]) history = model.fit(X_train, ytrain, epochs=self.hyperParam['epochs'], validation_data=(X_test, ytest)) self.log.info('Status:- |... Image Classification Algorithm applied:'+str(self.modelname)) #Saving trained model weights model.save_weights(os.path.join(self.outputdir, self.modelname)) saved_model = self.modelname modelname = self.modelname prediction = model.predict(X_train) predictedData = np.argmax(prediction,axis=1) mlobj = machinelearning() self.log.info('\n--------- Performance Matrix with Train Data ---------') trainingperformancematrix = mlobj.getClassificationPerformaceMatrix(y_train, predictedData,self.labelMapping) prediction = model.predict(X_test) predictedData = np.argmax(prediction,axis=1) self.log.info('\n--------- Performance Matrix with Test Data ---------') performancematrix = mlobj.getClassificationPerformaceMatrix(y_test, predictedData,self.labelMapping) df_test = pd.DataFrame() df_test['actual'] = y_test df_test['predict'] = predictedData df_test.to_csv(predicted_data_file) objClf = aion_matrix() scoring_param = 'Accuracy' score = objClf.get_score(scoring_param,y_test,predictedData) #score = mean(history.history['accuracy']) if self.AugEnabled and not self.keepAugImages: ia.removeAugmentedImages(train_df) scoredetails = '{"Model":"'+modelname+'","Score":'+str(round(score,2))+'}' self.log.info('Status:- |... Score Accuracy: '+str(round(score,2))) return saved_model,modelname,'ImageClassification',scoring_param,score,scoredetails,self.labelMapping,trainingperformancematrix,performancematrix
image_eda.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import warnings warnings.simplefilter("ignore") import os import numpy as np from numpy import asarray import cv2 import sys import random import glob as glob import math as m # for gamma function, called from scipy.special import gamma as tgamma import matplotlib.image as mpimg import skimage from libsvm import svmutil,svm #import svmutil from svmutil import * from svm import * from PIL import Image from collections import Counter from imutils import paths import matplotlib.pyplot as plt import json ################################################################################### #Input - # AGGD fit model, takes input as the MSCN Image / Pair-wise Product #Output - best values of image parameters #Defination - used as internal method to measure_ImageQualityScore ################################################################################### def AGGDfit(structdis): # variables to count positive pixels / negative pixels and their squared sum poscount = 0 negcount = 0 possqsum = 0 negsqsum = 0 abssum = 0 poscount = len(structdis[structdis > 0]) # number of positive pixels negcount = len(structdis[structdis < 0]) # number of negative pixels # calculate squared sum of positive pixels and negative pixels possqsum = np.sum(np.power(structdis[structdis > 0], 2)) negsqsum = np.sum(np.power(structdis[structdis < 0], 2)) # absolute squared sum abssum = np.sum(structdis[structdis > 0]) + np.sum(-1 * structdis[structdis < 0]) # calculate left sigma variance and right sigma variance lsigma_best = np.sqrt((negsqsum/negcount)) rsigma_best = np.sqrt((possqsum/poscount)) gammahat = lsigma_best/rsigma_best # total number of pixels - totalcount totalcount = structdis.shape[1] * structdis.shape[0] rhat = m.pow(abssum/totalcount, 2)/((negsqsum + possqsum)/totalcount) rhatnorm = rhat * (m.pow(gammahat, 3) + 1) * (gammahat + 1)/(m.pow(m.pow(gammahat, 2) + 1, 2)) prevgamma = 0 prevdiff = 1e10 sampling = 0.001 gam = 0.2 # vectorized function call for best fitting parameters vectfunc = np.vectorize(func, otypes = [np.float], cache = False) # calculate best fit params gamma_best = vectfunc(gam, prevgamma, prevdiff, sampling, rhatnorm) return [lsigma_best, rsigma_best, gamma_best] def func(gam, prevgamma, prevdiff, sampling, rhatnorm): while(gam < 10): r_gam = tgamma(2/gam) * tgamma(2/gam) / (tgamma(1/gam) * tgamma(3/gam)) diff = abs(r_gam - rhatnorm) if(diff > prevdiff): break prevdiff = diff prevgamma = gam gam += sampling gamma_best = prevgamma return gamma_best def compute_features(img): scalenum = 2 feat = [] # make a copy of the image im_original = img.copy() # scale the images twice for itr_scale in range(scalenum): im = im_original.copy() # normalize the image im = im / 255.0 # calculating MSCN coefficients mu = cv2.GaussianBlur(im, (7, 7), 1.166) mu_sq = mu * mu sigma = cv2.GaussianBlur(im*im, (7, 7), 1.166) sigma = (sigma - mu_sq)**0.5 # structdis is the MSCN image structdis = im - mu structdis /= (sigma + 1.0/255) # calculate best fitted parameters from MSCN image best_fit_params = AGGDfit(structdis) # unwrap the best fit parameters lsigma_best = best_fit_params[0] rsigma_best = best_fit_params[1] gamma_best = best_fit_params[2] # append the best fit parameters for MSCN image feat.append(gamma_best) feat.append((lsigma_best*lsigma_best + rsigma_best*rsigma_best)/2) # shifting indices for creating pair-wise products shifts = [[0,1], [1,0], [1,1], [-1,1]] # H V D1 D2 for itr_shift in range(1, len(shifts) + 1): OrigArr = structdis reqshift = shifts[itr_shift-1] # shifting index # create transformation matrix for warpAffine function M = np.float32([[1, 0, reqshift[1]], [0, 1, reqshift[0]]]) ShiftArr = cv2.warpAffine(OrigArr, M, (structdis.shape[1], structdis.shape[0])) Shifted_new_structdis = ShiftArr Shifted_new_structdis = Shifted_new_structdis * structdis # shifted_new_structdis is the pairwise product # best fit the pairwise product best_fit_params = AGGDfit(Shifted_new_structdis) lsigma_best = best_fit_params[0] rsigma_best = best_fit_params[1] gamma_best = best_fit_params[2] constant = m.pow(tgamma(1/gamma_best), 0.5)/m.pow(tgamma(3/gamma_best), 0.5) meanparam = (rsigma_best - lsigma_best) * (tgamma(2/gamma_best)/tgamma(1/gamma_best)) * constant # append the best fit calculated parameters feat.append(gamma_best) # gamma best feat.append(meanparam) # mean shape feat.append(m.pow(lsigma_best, 2)) # left variance square feat.append(m.pow(rsigma_best, 2)) # right variance square # resize the image on next iteration im_original = cv2.resize(im_original, (0,0), fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC) return feat def img_MeasureImageQuality(dataset_directory): """ #################################################################################### #Input - img_path #Output - Quality index of input image #Defination - function to calculate BRISQUE quality score in range of 0 and 100 [0:good;100:bad] #################################################################################### """ imgfile_dict = {} for file in os.listdir(dataset_directory): if (file.endswith(".jfif") or file.endswith(".png") or file.endswith(".jpg") or file.endswith(".jpeg")): filename = os.path.join(dataset_directory , file) if os.path.isfile(filename)==False: sys.exit() file_extension = os.path.splitext(filename)[1] if file_extension==".jfif": extension=".jfif" if file_extension==".png": extension=".png" if file_extension==".jpg": extension=".jpg" if file_extension==".jpeg": extension=".jpeg" if (extension not in [".jpg",".jpeg",".jfif",".png"]): sys.exit() try: # read image from given path dis = cv2.imread(filename, 1) if(dis is None): sys.exit(0) # convert to gray scale dis = cv2.cvtColor(dis, cv2.COLOR_BGR2GRAY) # compute feature vectors of the image features = compute_features(dis) # rescale the brisqueFeatures vector from -1 to 1 x = [0] # pre loaded lists from C++ Module to rescale brisquefeatures vector to [-1, 1] min_= [0.336999 ,0.019667 ,0.230000 ,-0.125959 ,0.000167 ,0.000616 ,0.231000 ,-0.125873 ,0.000165 ,0.000600 ,0.241000 ,-0.128814 ,0.000179 ,0.000386 ,0.243000 ,- 0.133080 ,0.000182 ,0.000421 ,0.436998 ,0.016929 ,0.247000 ,-0.200231 ,0.000104 ,0.000834 ,0.257000 ,-0.200017 ,0.000112 ,0.000876 ,0.257000 ,-0.155072 , 0.000112 ,0.000356 ,0.258000 ,-0.154374 ,0.000117 ,0.000351] max_= [9.999411, 0.807472, 1.644021, 0.202917, 0.712384, 0.468672, 1.644021, 0.169548, 0.713132, 0.467896, 1.553016, 0.101368, 0.687324, 0.533087, 1.554016, 0.101000 , 0.689177, 0.533133, 3.639918, 0.800955, 1.096995, 0.175286, 0.755547, 0.399270, 1.095995, 0.155928, 0.751488, 0.402398, 1.041992, 0.093209, 0.623516, 0.532925, 1.042992, 0.093714, 0.621958, 0.534484] # append the rescaled vector to x for i in range(0, 36): min = min_[i] max = max_[i] x.append(-1 + (2.0/(max - min) * (features[i] - min))) modelPath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'allmodel.txt') # load model model = svmutil.svm_load_model(modelPath) # create svm node array from python list x, idx = gen_svm_nodearray(x[1:], isKernel=(model.param.kernel_type == PRECOMPUTED)) x[36].index = -1 # set last index to -1 to indicate the end. # get important parameters from model svm_type = model.get_svm_type() is_prob_model = model.is_probability_model() nr_class = model.get_nr_class() if svm_type in (ONE_CLASS, EPSILON_SVR, NU_SVC): # here svm_type is EPSILON_SVR as it's regression problem nr_classifier = 1 dec_values = (c_double * nr_classifier)() # calculate the quality score of the image using the model and svm_node_array qualityscore = svmutil.libsvm.svm_predict_probability(model, x, dec_values) imgfile_dict[file] = round(qualityscore,2) #print ("Quality Score of the given image is: ", qualityscore, "[0:Good;100:Bad]") except: pass finally: warnings.simplefilter("ignore") #print(imgfile_dict) return imgfile_dict # calculate moode def mode(arr): if arr==[]: return None else: return max(set(arr), key=arr.count) def img_EDA(dataset_directory): """ #################################################################################### #Input - dataset_directory with all type of Images #Output - mean,median and mode image size, channels type, extensions, recommendation of images etc #Defination - img_EDA takes the all images and print the EDA results #################################################################################### """ imgeda_dict = {} # check input directory if os.path.isdir(dataset_directory)==False: print("folder does not exist") sys.exit() width_list=[] height_list=[] k=[] c=[] cnum=[] v=[] ext=[] cnt=0 for item in os.listdir(dataset_directory): if (item.endswith(".jfif") or item.endswith(".png") or item.endswith(".jpg") or item.endswith(".jpeg")): if os.path.isfile(os.path.join(dataset_directory , item)): im = Image.open(os.path.join(dataset_directory , item)) c.append(im.mode) cnum.append(len(im.mode)) width_list.append(im.width) height_list.append(im.height) k.append(im.size) v.append(im.width*im.height) f, e = os.path.splitext(os.path.join(dataset_directory , item)) ext.append(e) cnt=cnt+1 # calculate biggest and smallest image img_dict={} for key, val in zip(k, v): img_dict[key] = val max_key = max(img_dict, key=img_dict.get) #max_key min_key = min(img_dict, key=img_dict.get) #min_key imgeda_dict['Channels'] = set(c) imgeda_dict['Extensions'] = set(ext) imgeda_dict['Total_Images'] = cnt imgeda_dict['Smallest_Image'] = min_key imgeda_dict['Largest_Image'] = max_key imgeda_dict['Mean_Width'] = int(np.mean(width_list)) imgeda_dict['Mean_Height'] = int(np.mean(height_list)) imgeda_dict['Median_Width'] = int(np.median(width_list)) imgeda_dict['Median_Height'] = int(np.median(height_list)) imgeda_dict['Mode_Width'] = int(mode(width_list)) imgeda_dict['Mode_Height'] = int(mode(height_list)) imgeda_dict['Recomended_Mean_Width_Height'] = (int(np.mean(width_list)),int(np.mean(height_list))) imgeda_dict['Recomended_Median_Width_Height'] = (int(np.median(width_list)),int(np.median(height_list))) imgeda_dict['Recomended_Mode_Width_Height'] = (int(mode(width_list)),int(mode(height_list))) imgeda_dict['Size_Distribution'] = dict(Counter(k).items()) imgeda_dict['Channel_Mean'] = np.mean(cnum) imgeda_dict['Channel_Standard_Deviation'] = np.std(cnum) ''' print('*-----------------------<<< RESULTS >>>-------------------------*') print() print('%-30s | ' % 'Channels', set(c)) print('%-30s | ' % 'Extensions', set(ext)) print('*---------------------------------------------------------------*') print('%-30s | ' % 'Total Images', cnt) print('%-30s | ' % 'Smallest Image', min_key) print('%-30s | ' % 'Largest Image', max_key) print('*---------------------------------------------------------------*') print('%-30s | ' % 'Mean Width', int(np.mean(width_list))) print('%-30s | ' % 'Mean Height', int(np.mean(height_list))) print('*---------------------------------------------------------------*') print('%-30s | ' % 'Median Width', int(np.median(width_list))) print('%-30s | ' % 'Median Height', int(np.median(height_list))) print('*---------------------------------------------------------------*') print('%-30s | ' % 'Mode Width', int(mode(width_list))) print('%-30s | ' % 'Mode Height', int(mode(height_list))) print('*---------------------------------------------------------------*') print('%-30s | ' % 'recommended size by mean(w,h)',(int(np.mean(width_list)),int(np.mean(height_list)))) print('*---------------------------------------------------------------*') print('%-30s | ' % 'recommended size by median(w,h)',(int(np.median(width_list)),int(np.median(height_list)))) print('*---------------------------------------------------------------*') print('%-30s | ' % 'recommended size by mode(w,h)',(int(mode(width_list)),int(mode(height_list)))) print('*---------------------------------------------------------------*') print('%-30s | ' % 'distribution of sizes',dict(Counter(k).items()) ) print('*---------------------------------------------------------------*') print('%-30s | ' % 'channel mean',np.mean(cnum)) print('%-30s | ' % 'channel standard deviation',np.std(cnum)) ''' #print(imgeda_dict) return imgeda_dict def dhash(image, hashSize=8): # convert the image to grayscale and resize the grayscale image, # adding a single column (width) so we can compute the horizontal # gradient gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) resized = cv2.resize(gray, (hashSize + 1, hashSize)) # compute the (relative) horizontal gradient between adjacent # column pixels diff = resized[:, 1:] > resized[:, :-1] # convert the difference image to a hash and return it return sum([2 ** i for (i, v) in enumerate(diff.flatten()) if v]) def img_duplicatefinder(dataset_directory): # grab the paths to all images in our input dataset directory and # then initialize our hashes dictionary print("[INFO] computing image hashes...") imagePaths = list(paths.list_images(dataset_directory)) hashes = {} duplimg_list = [] remove_file = 0 # loop over our image paths for imagePath in imagePaths: # load the input image and compute the hash image = cv2.imread(imagePath) h = dhash(image) # grab all image paths with that hash, add the current image # path to it, and store the list back in the hashes dictionary p = hashes.get(h, []) p.append(imagePath) hashes[h] = p # loop over the image hashes for (h, hashedPaths) in hashes.items(): # check to see if there is more than one image with the same hash if len(hashedPaths) > 1: #print(hashedPaths) duplimg_list.append(hashedPaths) return duplimg_list def img_plot_colour_hist(dataset_directory): import io, base64, urllib red_values = []; green_values = []; blue_values = []; all_channels = [] imagePaths = list(paths.list_images(dataset_directory)) for imagePath in imagePaths: img = np.array(Image.open(imagePath)) red_values.append(np.mean(img[:, :, 0])) green_values.append(np.mean(img[:, :, 1])) blue_values.append(np.mean(img[:, :, 2])) all_channels.append(np.mean(img)) _, axes = plt.subplots(ncols=4, nrows=1, constrained_layout=True, figsize=(16, 3), sharey=True) for ax, column, vals, c in zip( axes, ['red', 'green', 'blue', 'all colours'], [red_values, green_values, blue_values, all_channels], 'rgbk' ): ax.hist(vals, bins=100, color=c) ax.set_title(f'{column} hist') plt.suptitle("Image Dataset Colour Distribution") buf = io.BytesIO() plt.savefig(buf, format='png') buf.seek(0) string = base64.b64encode(buf.read()) uri = 'data:image/png;base64,' + urllib.parse.quote(string) return uri
predict.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import keras from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.utils import to_categorical from keras.preprocessing import image import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from keras.utils import to_categorical from Fkeras.layers import Input from keras.models import Model from keras.optimizers import Adam from keras.applications import VGG16 from tensorflow.keras.callbacks import EarlyStopping from sklearn.metrics import classification_report,confusion_matrix,precision_recall_curve import seaborn as sns def PredictCAST(test_csv, test_dataset_directory, load_model_dir, model_name, hparams_config_file): hyperparam_config = hparams_config_file['img_classifier'] print("[Info] loading imagenet weights...") #baseModel = keras.applications.ResNet101(weights="imagenet", include_top=False, input_tensor=Input(shape=(128, 128, 3))) if model_name == "densenet": print('Loading Densenet model') baseModel = keras.applications.DenseNet121(weights="imagenet", include_top=False, input_tensor=Input(shape=(hyperparam_config['img_width'],hyperparam_config['img_height'],hyperparam_config['img_channel']))) #98 elif model_name == "inception": print('Loading Inception model') baseModel = keras.applications.InceptionV3(weights="imagenet", include_top=False, input_tensor=Input(shape=(hyperparam_config['img_width'],hyperparam_config['img_height'],hyperparam_config['img_channel']))) #97 headModel = baseModel.output headModel = Flatten(name="flatten")(headModel) headModel = Dense(1024, activation='relu')(headModel) headModel = Dropout(0.5)(headModel) headModel = Dense(2, activation='sigmoid')(headModel) model = Model(inputs=baseModel.input, outputs=headModel) print("[INFO] compiling model...") opt = Adam(lr=hyperparam_config['lr']) model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"]) model.load_weights(os.path.join(load_model_dir, model_name)) #model.load_weights(load_model_dir) test_image = [] for i in range(test_csv.shape[0]): img = image.load_img(test_dataset_directory + '/' + str(test_csv['file_name'][i]), target_size=(hyperparam_config['img_width'],hyperparam_config['img_height'],hyperparam_config['img_channel']), grayscale=False) img = image.img_to_array(img) img = img/255 test_image.append(img) test_images = np.array(test_image) test_labels = test_csv['class'].values test_labels = to_categorical(test_labels) # making predictions prediction = model.predict(test_images) prediction = np.argmax(prediction,axis=1) print('Classification Report : ') print(classification_report(test_csv['class'],prediction)) sns.heatmap(confusion_matrix(test_csv['class'],prediction),annot=True) plt.show() print('Confusion matrix : ') print(confusion_matrix(test_csv['class'],prediction)) print("[INFO] Evaluating model accuracy and loss...Take some moment...") test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2) print('\nTest accuracy:', test_acc) print('\nTest loss:', test_loss) print("Prediction Completed...")
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
predict_single.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import keras from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.utils import to_categorical from keras.preprocessing import image import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from keras.utils import to_categorical from keras.layers import Input from keras.models import Model from keras.optimizers import Adam from keras.applications import VGG16 from tensorflow.keras.callbacks import EarlyStopping from sklearn.metrics import classification_report,confusion_matrix,precision_recall_curve import seaborn as sns import cv2 def PredictCAST(test_image, load_model_dir, model_name, hparams_config_file): hyperparam_config = hparams_config_file['img_classifier'] print("[Info] loading imagenet weights...") #baseModel = keras.applications.ResNet101(weights="imagenet", include_top=False, input_tensor=Input(shape=(128, 128, 3))) if model_name == "densenet": print('Loading Densenet model') baseModel = keras.applications.DenseNet121(weights="imagenet", include_top=False, input_tensor=Input(shape=(hyperparam_config['img_width'],hyperparam_config['img_height'],hyperparam_config['img_channel']))) #98 elif model_name == "inception": print('Loading Inception model') baseModel = keras.applications.InceptionV3(weights="imagenet", include_top=False, input_tensor=Input(shape=(hyperparam_config['img_width'],hyperparam_config['img_height'],hyperparam_config['img_channel']))) #97 headModel = baseModel.output headModel = Flatten(name="flatten")(headModel) headModel = Dense(1024, activation='relu')(headModel) headModel = Dropout(0.5)(headModel) headModel = Dense(2, activation='sigmoid')(headModel) model = Model(inputs=baseModel.input, outputs=headModel) print("[INFO] compiling model...") opt = Adam(lr=hyperparam_config['lr']) model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"]) model.load_weights(os.path.join(load_model_dir, model_name)) img = cv2.imread(test_image) img = cv2.resize(img, (hyperparam_config['img_width'],hyperparam_config['img_height'])) orig = img.copy() img = image.img_to_array(img) img = np.expand_dims(img, axis=0) img = img/255 print("[Info] predicting output") #prediction = model.predict_classes(img) prediction = model.predict(img) prediction = np.argmax(prediction,axis=1) print(prediction) if (prediction<0.5): print("def_front") cv2.putText(orig, "def_front", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) else: print("ok_front") cv2.putText(orig, "ok_front", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) plt.imshow(orig) plt.axis('off') plt.show() print("Prediction Completed...")
incMachineLearning.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import warnings warnings.filterwarnings('ignore') import logging import sklearn from random import sample from numpy.random import uniform import numpy as np import math import pickle import os import json from math import isnan from sklearn.preprocessing import binarize from sklearn.preprocessing import LabelEncoder import pandas as pd from sklearn.preprocessing import LabelBinarizer from sklearn.model_selection import train_test_split from incremental.incClassificationModel import incClassifierModel from incremental.incRegressionModel import incRegressionModel class incMachineLearning(object): def __init__(self,mlobj): self.features=[] self.mlobj=mlobj self.log = logging.getLogger('eion') def startLearning(self,mlconfig,modelType,modelParams,modelList,scoreParam,features,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,targetType,deployLocation,iterName,iterVersion,trained_data_file,predicted_data_file,labelMaps): model = 'None' params = 'None' score = 0xFFFF estimator = None model_tried = '' threshold = -1 pscore = -1 rscore = -1 topics = {} if(targetColumn != ''): targetData = dataFrame[targetColumn] datacolumns=list(dataFrame.columns) if targetColumn in datacolumns: datacolumns.remove(targetColumn) scoreParam = self.mlobj.setScoreParams(scoreParam,modelType,categoryCountList) self.log.info('\n-------------- Training ML: Start --------------') model_type,model,params, score, estimator,model_tried,xtrain,ytrain,xtest,ytest,threshold,pscore,rscore,method,incObj=self.startLearnerModule(mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,targetType,deployLocation,iterName,iterVersion,trained_data_file,labelMaps) self.log.info('-------------- Training ML: End --------------\n') filename = os.path.join(deployLocation,'production','model',model+'.pkl') saved_model = model+'.pkl' pickle.dump(estimator, open(filename, 'wb')) df_test = xtest.copy() df_test.reset_index(inplace = True,drop=True) trainPredictedData = incObj.bestTrainPredictedData predictedData = incObj.bestPredictedData try: if(model_type == 'Classification'): self.log.info('\n--------- Performance Matrix with Train Data ---------') train_matrix = self.mlobj.getClassificationPerformaceMatrix(ytrain,trainPredictedData,labelMaps) self.log.info('--------- Performance Matrix with Train Data End ---------\n') self.log.info('\n--------- Performance Matrix with Test Data ---------') performancematrix = self.mlobj.getClassificationPerformaceMatrix(ytest,predictedData,labelMaps) ytest.reset_index(inplace=True,drop=True) df_test['actual'] = ytest df_test['predict'] = predictedData self.log.info('--------- Performance Matrix with Test Data End ---------\n') matrix = performancematrix elif(model_type == 'Regression'): self.log.info('\n--------- Performance Matrix with Train Data ---------') train_matrix = self.mlobj.get_regression_matrix(ytrain, trainPredictedData) self.log.info('--------- Performance Matrix with Train Data End ---------\n') self.log.info('\n--------- Performance Matrix with Test Data ---------') matrix = self.mlobj.get_regression_matrix(ytest, predictedData) ytest.reset_index(inplace=True, drop=True) df_test['actual'] = ytest df_test['predict'] = predictedData self.log.info('--------- Performance Matrix with Test Data End ---------\n') except Exception as Inst: self.log.info('--------- Error Performance Matrix ---------\n') self.log.info(str(Inst)) df_test['predict'] = predictedData matrix = "" train_matrix = "" self.log.info('--------- Performance Matrix with Test Data End ---------\n') df_test.to_csv(predicted_data_file) return 'Success',model_type,model,saved_model,matrix,train_matrix,xtrain.shape,model_tried,score,filename,self.features,threshold,pscore,rscore,method,estimator,xtrain,ytrain,xtest,ytest,topics,params def startLearnerModule(self,mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,targetType,deployLocation,iterName,iterVersion,trained_data_file,labelMaps): matrix = '' threshold = -1 pscore = -1 rscore = -1 datacolumns=list(xtrain.columns) if targetColumn in datacolumns: datacolumns.remove(targetColumn) self.features =datacolumns self.log.info('-------> Features Used For Training the Model: '+(str(self.features))[:500]) xtrain = xtrain[self.features] xtest = xtest[self.features] method = mlconfig['optimizationMethod'] method = method.lower() geneticParam = '' optimizationHyperParameter = mlconfig['optimizationHyperParameter'] cvSplit = optimizationHyperParameter['trainTestCVSplit'] nIter = int(optimizationHyperParameter['iterations']) if(method.lower() == 'genetic'): geneticParam = optimizationHyperParameter['geneticparams'] scoreParam = scoreParam if 'thresholdTunning' in mlconfig: thresholdTunning = mlconfig['thresholdTunning'] else: thresholdTunning = 'NA' if cvSplit == "": cvSplit =None else: cvSplit =int(cvSplit) if modelType == 'classification': model_type = "Classification" MakeFP0 = False MakeFN0 = False if(len(categoryCountList) == 2): if(thresholdTunning.lower() == 'fp0'): MakeFP0 = True elif(thresholdTunning.lower() == 'fn0'): MakeFN0 = True noOfClasses= len(labelMaps) incObjClf = incClassifierModel(noOfClasses,modelList, modelParams, scoreParam, cvSplit, nIter,geneticParam, xtrain,ytrain,xtest,ytest,method,modelType,MakeFP0,MakeFN0,deployLocation) model, params, score, estimator,model_tried,threshold,pscore,rscore = incObjClf.firstFit() incObj = incObjClf elif modelType == 'regression': model_type = "Regression" incObjReg = incRegressionModel(modelList, modelParams, scoreParam, cvSplit, nIter,geneticParam, xtrain,ytrain,xtest,ytest,method,deployLocation) model,params,score,estimator,model_tried = incObjReg.firstFit() incObj = incObjReg return model_type,model,params, score, estimator,model_tried,xtrain,ytrain,xtest,ytest,threshold,pscore,rscore,method, incObj
incRegressionModel.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' from learner.optimizetechnique import OptimizationTq from learner.parameters import parametersDefine import time import logging import os import sys from sklearn.metrics import r2_score from sklearn.metrics import mean_absolute_error,make_scorer from sklearn.metrics import mean_squared_error from learner.aion_matrix import aion_matrix class incRegressionModel(): def __init__(self,modelList,params,scoreParam,cvSplit,numIter,geneticParam,trainX,trainY,testX,testY,method,deployLocation): self.modelList =modelList self.params =params self.trainX =trainX self.trainY =trainY self.testX = testX self.testY = testY self.method =method self.scoreParam=scoreParam self.cvSplit=cvSplit self.numIter=numIter self.geneticParam=geneticParam self.log = logging.getLogger('eion') self.deployLocation = deployLocation self.bestTrainPredictedData = None self.bestPredictedData = None self.AlgorithmNames={'Online Linear Regression':'Online Linear Regression', 'Online Decision Tree Regressor':'Online Decision Tree Regressor', 'Online KNN Regressor':'Online KNN Regressor'} self.modelToAlgoNames = {value: key for key, value in self.AlgorithmNames.items()} def firstFit(self): bestModel='' bestParams={} import sys bestScore=-sys.float_info.max #bugfix 11656 scoredetails = '' self.log.info('\n---------- Regression Model has started ----------') try: for modelName in self.modelList: if modelName not in self.params: continue paramSpace=self.params[modelName] algoName = self.AlgorithmNames[modelName] from incremental.riverML import riverML riverMLObj = riverML() self.log.info("-------> Model Name: "+str(modelName)) start = time.time() model, modelParams, estimator, trainPredictedData = riverMLObj.startLearn('regression',algoName,paramSpace,self.trainX, self.trainY) modelParams = str(modelParams) executionTime=time.time() - start self.log.info('---------> Total Execution: '+str(executionTime)) predictedData = riverMLObj.getPrediction(estimator,self.testX) if 'neg_mean_squared_error' in self.scoreParam: meanssquatederror = mean_squared_error(self.testY,predictedData) score = meanssquatederror elif 'neg_root_mean_squared_error' in self.scoreParam: rootmeanssquatederror=mean_squared_error(self.testY,predictedData,squared=False) score = rootmeanssquatederror elif 'neg_mean_absolute_error' in self.scoreParam: meanabsoluteerror=mean_absolute_error(self.testY,predictedData) score = meanabsoluteerror elif 'r2' in self.scoreParam: r2score=r2_score(self.testY,predictedData) score = round(r2score*100, 2) if self.scoreParam == "r2": if score>bestScore: bestScore =score bestModel =model bestParams=modelParams bestEstimator=estimator self.bestTrainPredictedData = trainPredictedData self.bestPredictedData = predictedData else: if abs(score) < bestScore or bestScore == -sys.float_info.max: bestScore =abs(score) bestModel =model bestParams=modelParams bestEstimator=estimator self.bestTrainPredictedData = trainPredictedData self.bestPredictedData = predictedData metrices = {} metrices["score"] = score if(scoredetails != ''): scoredetails += ',' scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","Score":'+str(abs(score))+'}' self.log.info('Status:- |... ML Algorithm applied: '+modelName) self.log.info("Status:- |... Testing Score: "+str(score)) self.log.info('---------- Regression Model End ---------- \n') self.log.info('\n------- Best Model and its parameters -------------') self.log.info('Status:- |... Best Algorithm selected: '+str(self.modelToAlgoNames[bestModel])+' Score='+str(round(bestScore,2))) self.log.info("-------> Best Name: "+str(bestModel)) self.log.info("-------> Best Score: "+str(bestScore)) return self.modelToAlgoNames[bestModel],bestParams,bestScore,bestEstimator,scoredetails except Exception as inst: self.log.info( '\n-----> regressionModel failed!!!.'+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
incClassificationModel.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import time import os import sys import numpy as np from sklearn.metrics import confusion_matrix from sklearn.metrics import recall_score from sklearn.metrics import precision_score from sklearn.preprocessing import binarize from learner.optimizetechnique import OptimizationTq from learner.parameters import parametersDefine import logging from learner.aion_matrix import aion_matrix # apply threshold to positive probabilities to create labels def to_labels(pos_probs, threshold): return (pos_probs >= threshold).astype('int') class incClassifierModel(): def __init__(self,noOfClasses,modelList,params,scoreParam,cvSplit,numIter,geneticParam,trainX,trainY,testX,testY,method,modelType,MakeFP0,MakeFN0,deployLocation): self.noOfClasses = noOfClasses self.modelList =modelList self.params =params self.trainX =trainX self.X =trainX self.trainY =trainY self.testX = testX self.testY = testY self.method =method self.scoreParam=scoreParam self.cvSplit=cvSplit self.numIter=numIter self.geneticParam=geneticParam self.MakeFP0= MakeFP0 self.MakeFN0=MakeFN0 self.log = logging.getLogger('eion') self.modelType = modelType self.deployLocation = deployLocation self.isRiverModel = False self.AlgorithmNames={'Online Logistic Regression':'Online Logistic Regression', 'Online Softmax Regression':'Online Softmax Regression', 'Online Decision Tree Classifier':'Online Decision Tree Classifier', 'Online KNN Classifier':'Online KNN Classifier'} self.modelToAlgoNames = {value: key for key, value in self.AlgorithmNames.items()} def check_threshold(self,estimator,testX,testY,threshold_range,checkParameter,modelName): thresholdx = -1 for threshold in threshold_range: predictedData = estimator.predict_proba(testX) predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold)#bug 12437 p_score = precision_score(testY, predictedData) r_score = recall_score(testY, predictedData) tn, fp, fn, tp = confusion_matrix(testY, predictedData).ravel() if(checkParameter.lower() == 'fp'): if fp == 0: if(p_score == 1): thresholdx = threshold self.log.info('---------------> Best Threshold:'+str(threshold)) self.log.info('---------------> Best Precision:'+str(p_score)) self.log.info('---------------> Best Recall:'+str(r_score)) self.log.info('---------------> TN:'+str(tn)) self.log.info('---------------> FP:'+str(fp)) self.log.info('---------------> FN:'+str(fn)) self.log.info('---------------> TP:'+str(tp)) break if(checkParameter.lower() == 'fn'): if fn == 0: if(r_score == 1): thresholdx = threshold self.log.info('---------------> Best Threshold:'+str(threshold)) self.log.info('---------------> Best Precision:'+str(p_score)) self.log.info('---------------> Best Recall:'+str(r_score)) self.log.info('---------------> TN:'+str(tn)) self.log.info('---------------> FP:'+str(fp)) self.log.info('---------------> FN:'+str(fn)) self.log.info('---------------> TP:'+str(tp)) break return(thresholdx,p_score,r_score) def getBestModel(self,fp0,fn0,threshold,bestthreshold,rscore,brscore,pscore,bpscore,tscore,btscore): cmodel = False if(threshold != -1): if(bestthreshold == -1): cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore elif fp0: if rscore > brscore: cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore elif rscore == brscore: if tscore > btscore or btscore == -0xFFFF: cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore elif fn0: if pscore > bpscore: cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore elif pscore == bpscore: if tscore > btscore or btscore == -0xFFFF: cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore else: if tscore > btscore or btscore == -0xFFFF: cmodel = True btscore = tscore else: if(bestthreshold == -1): if tscore > btscore or btscore == -0xFFFF: cmodel = True btscore = tscore return cmodel,btscore,bestthreshold,brscore,bpscore def firstFit(self): bestModel='None' bestParams={} bestScore=-0xFFFF bestEstimator = 'None' scoredetails = '' threshold = -1 bestthreshold = -1 precisionscore =-1 bestprecisionscore=-1 recallscore = -1 bestrecallscore=-1 self.bestTrainPredictedData = None self.bestPredictedData = None self.log.info('\n---------- ClassifierModel has started ----------') objClf = aion_matrix() try: for modelName in self.modelList: paramSpace=self.params[modelName] algoName = self.AlgorithmNames[modelName] from incremental.riverML import riverML riverMLObj = riverML() self.log.info("-------> Model Name: "+str(modelName)) start = time.time() model, modelParams, estimator, trainPredictedData = riverMLObj.startLearn('classification',algoName,paramSpace,self.trainX, self.trainY, self.noOfClasses) modelParams = str(modelParams) predictedData = riverMLObj.getPrediction(estimator,self.testX) executionTime=time.time() - start self.testY.reset_index(inplace=True, drop=True) score = objClf.get_score(self.scoreParam,self.testY.values.flatten(),predictedData.values.flatten()) self.log.info(str(score)) metrices = {} metrices["score"] = score threshold = -1 precisionscore = precision_score(self.testY, predictedData, average='macro') recallscore = recall_score(self.testY, predictedData, average='macro') self.log.info('---------> Total Execution: '+str(executionTime)) if(scoredetails != ''): scoredetails += ',' scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","Score":'+str(score)+'}' status,bscore,bthres,brscore,bpscore = self.getBestModel(self.MakeFP0,self.MakeFN0,threshold,bestthreshold,recallscore,bestrecallscore,precisionscore,bestprecisionscore,score,bestScore) if status: bestScore =bscore bestModel =model bestParams=modelParams bestEstimator=estimator bestthreshold = threshold bestrecallscore = recallscore bestprecisionscore = precisionscore self.bestTrainPredictedData = trainPredictedData self.bestPredictedData = predictedData self.log.info('Status:- |... ML Algorithm applied: '+modelName) self.log.info("Status:- |... Testing Score: "+str(score)) self.log.info('---------- ClassifierModel End ---------- \n') self.log.info('\n------- Best Model and its parameters -------------') self.log.info('Status:- |... Best Algorithm selected: '+str(self.modelToAlgoNames[bestModel])+' Score='+str(round(bestScore,2))) self.log.info("-------> Best Name: "+str(bestModel)) self.log.info("-------> Best Score: "+str(bestScore)) return self.modelToAlgoNames[bestModel],bestParams,bestScore,bestEstimator,scoredetails,bestthreshold,bestprecisionscore,bestrecallscore except Exception as inst: self.log.info( '\n-----> ClassifierModel failed!!!.'+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
incProfiler.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' #System imports import logging import os import sys import pickle #Sci-Tools imports import numpy as np import pandas as pd from sklearn.preprocessing import LabelEncoder from scipy import stats from word2number import w2n #river imports from river.preprocessing import StatImputer from river import stats, compose, anomaly class incProfiler(): def __init__(self): self.DtypesDic={} self.pandasNumericDtypes=['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] self.allNumberTypeCols = [] #all number type columns self.allNumCols = [] #only numerical columns which includes num features and target if it is numerical self.allCatCols = [] self.numFtrs = [] self.catFtrs = [] self.textFtrs = [] self.textVectorFtrs = [] self.numDiscreteCols = [] self.numContinuousCols = [] self.wordToNumericFeatures=[] self.emptyCols=[] self.missingCols = [] self.targetColumn = "" self.le_dict = {} self.configDict = {} self.incFill = None self.incLabelMapping = None self.incCatEncoder = None self.incScaler = None self.incOutlierRem = None self.log = logging.getLogger('eion') def pickleDump(self, model, path): if model is not None: with open(path, 'wb') as f: pickle.dump(model, f) def saveProfilerModels(self, deployLocation): if isinstance(self.incFill['num_fill'], StatImputer) or isinstance(self.incFill['cat_fill'], StatImputer): self.pickleDump(self.incFill, os.path.join(deployLocation,'production','profiler','incFill.pkl')) self.pickleDump(self.incLabelMapping, os.path.join(deployLocation,'production','profiler','incLabelMapping.pkl')) self.pickleDump(self.incCatEncoder, os.path.join(deployLocation,'production','profiler','incCatEncoder.pkl')) self.pickleDump(self.incScaler, os.path.join(deployLocation,'production','profiler','incScaler.pkl')) self.pickleDump(self.incOutlierRem, os.path.join(deployLocation,'production','profiler','incOutlierRem.pkl')) def featureAnalysis(self, df, conf_json, targetFeature): try: self.log.info('-------> Remove Duplicate Rows') noofdplicaterows = df.duplicated(keep='first').sum() df = df.drop_duplicates(keep="first") df = df.reset_index(drop=True) self.log.info('Status:- |... Duplicate row treatment done: '+str(noofdplicaterows)) self.log.info(df.head(5)) self.log.info( '\n----------- Inspecting Features -----------') ctn_count = 0 df = df.replace('-', np.nan) df = df.replace('?', np.nan) dataFDtypes=self.dataFramecolType(df) numerical_ratio = float(conf_json['numericFeatureRatio']) categoricalMaxLabel = int(conf_json['categoryMaxLabel']) indexFeatures = [] numOfRows = df.shape[0] dataCols = df.columns for item in dataFDtypes: if(item[1] == 'object'): filteredDf,checkFlag = self.smartFilter(item[0],df,numerical_ratio) if(checkFlag): self.wordToNumericFeatures.append(item[0]) self.log.info('----------> Data Type Converting to numeric :Yes') try: df[item[0]]=filteredDf[item[0]].astype(float) except: pass ctn_count = ctn_count+1 else: count = (df[item[0]] - df[item[0]].shift() == 1).sum() if((numOfRows - count) == 1): self.log.info( '-------> Feature :'+str(item[0])) self.log.info('----------> Sequence Feature') indexFeatures.append(item[0]) self.configDict['wordToNumCols'] = self.wordToNumericFeatures self.configDict['emptyFtrs'] = indexFeatures self.log.info('Status:- |... Feature inspection done for numeric data: '+str(ctn_count)+' feature(s) converted to numeric') self.log.info('Status:- |... Feature word to numeric treatment done: '+str(self.wordToNumericFeatures)) self.log.info( '----------- Inspecting Features End -----------\n') except Exception as inst: self.log.info("Error in Feature inspection: "+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) try: self.log.info('\n---------- Dropping Index features ----------') self.log.info('Index Features to remove '+str(indexFeatures)) if len(indexFeatures) > 0: dataCols = list(set(dataCols) - set(indexFeatures)) for empCol in indexFeatures: self.log.info('-------> Drop Feature: '+empCol) df = df.drop(columns=[empCol]) self.log.info('---------- Dropping Index features End----------\n') dataFDtypes=self.dataFramecolType(df) categoricalMaxLabel = int(conf_json['categoryMaxLabel']) for item in dataFDtypes: self.DtypesDic[item[0]] = item[1] nUnique=len(df[item[0]].unique().tolist()) if item[1] in self.pandasNumericDtypes: self.allNumberTypeCols.append(item[0]) if nUnique >= categoricalMaxLabel: self.allNumCols.append(item[0]) #pure numerical if item[1] in ['int16', 'int32', 'int64']: self.numDiscreteCols.append(item[0]) elif item[1] in ['float16', 'float32', 'float64']: self.numContinuousCols.append(item[0]) else: self.allCatCols.append(item[0]) elif item[1] != 'bool': if (nUnique >= categoricalMaxLabel) and targetFeature != item[0]: self.textFtrs.append(item[0]) else: col = item[0] if (max(df[col].astype(str).str.split().str.len()) > 10) and targetFeature != item[0]: self.textFtrs.append(item[0]) else: self.allCatCols.append(item[0]) else: self.allCatCols.append(item[0]) misval_ratio = float(conf_json['misValueRatio']) self.configDict['misval_ratio'] = misval_ratio missingCols, emptyCols = self.getMissingVals(df, dataCols, misval_ratio) if targetFeature in emptyCols: raise Exception('Target column '+str(targetFeature)+' cannot be empty') dataCols = list(set(dataCols) - set(emptyCols)) self.log.info('\n---------- Dropping empty features ----------') for empCol in emptyCols: self.log.info('-------> Drop Feature: '+empCol) df = df.drop(columns=[empCol]) self.log.info('---------- Dropping empty features End----------\n') self.log.info('Status:- |... Empty feature treatment done: '+str(len(emptyCols))+' empty feature(s) found') self.log.info('-------> Data Frame Shape After Dropping (Rows,Columns): '+str(df.shape)) self.allNumCols = list(set(self.allNumCols) - set(emptyCols)) self.allCatCols = list(set(self.allCatCols) - set(emptyCols)) self.textFtrs = list(set(self.textFtrs) - set(emptyCols)) missingValFtrs = list(set(missingCols) - set(emptyCols)) self.log.info(str(len(missingValFtrs))+' feature(s) found with missing value(s)') self.log.info('\n-------> Numerical continuous columns :'+(str(self.numContinuousCols))[:500]) self.log.info('-------> Numerical discrete columns :'+(str(self.numDiscreteCols))[:500]) self.log.info('-------> Non numerical columns :'+(str(self.allCatCols))[:500]) self.log.info('-------> Text columns :'+(str(self.textFtrs))[:500]) except Exception as inst: self.log.info("Error in segregating numerical and categorical columns: "+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) return df, missingValFtrs, emptyCols, dataCols, self.allNumCols, self.allCatCols, self.textFtrs def createIncProfiler(self, df, conf_json, allNumCols, numFtrs, allCatCols, textFtrs, missingValFtrs): self.incLabelMapping = None catFtrs = allCatCols.copy() #LabelEncoding if self.targetColumn in allCatCols: catFtrs.remove(self.targetColumn) self.incLabelMapping = LabelEncoder() df[self.targetColumn] = df[self.targetColumn].apply(str) self.incLabelMapping.fit(df[self.targetColumn]) self.le_dict = dict(zip(self.incLabelMapping.classes_, self.incLabelMapping.transform(self.incLabelMapping.classes_))) self.log.info('----------> Encoded Values of Target Labels: '+(str(self.le_dict))[:500]) #self.incFill --> {num_fill:SI/0.0/'drop', cat_fill:SI/0.0/'drop'} #fill self.incFill = {} self.incCatEncoder = None self.incScaler = None self.incOutlierRem = None num_fill_method = 'Mean' for x in list(conf_json['numericalFillMethod'].keys()): if conf_json['numericalFillMethod'][x] == 'True': num_fill_method = x break if num_fill_method.lower() =='mean': num_fill = [(col, stats.Mean()) for col in allNumCols] self.incFill['num_fill'] = StatImputer(*num_fill) elif num_fill_method.lower() =='min': num_fill = [(col, stats.Min()) for col in allNumCols] self.incFill['num_fill'] = StatImputer(*num_fill) elif num_fill_method.lower() == 'max': num_fill = [(col, stats.Max()) for col in allNumCols] self.incFill['num_fill'] = StatImputer(*num_fill) elif num_fill_method.lower() =='zero': self.incFill['num_fill'] = 'zero' elif num_fill_method.lower() =='drop': self.incFill['num_fill'] = 'drop' else: num_fill = [(col, stats.Mean()) for col in allNumCols] self.incFill['num_fill'] = StatImputer(*num_fill) cat_fill_method = 'Mode' for x in list(conf_json['categoricalFillMethod'].keys()): if conf_json['categoricalFillMethod'][x] == 'True': cat_fill_method = x break if cat_fill_method.lower() =='zero': self.incFill['cat_fill'] = 'zero' elif cat_fill_method.lower() == 'mode': cat_fill = [(col, stats.Mode()) for col in allCatCols] self.incFill['cat_fill'] = StatImputer(*cat_fill) elif cat_fill_method.lower() =='drop': self.incFill['cat_fill'] = 'drop' #CatEncoding for x in list(conf_json['categoryEncoding'].keys()): if conf_json['categoryEncoding'][x] == 'True': catEncoder = x break catEncHow = 'Mean' for x in list(conf_json['targetEncodingParams']['how'].keys()): if conf_json['targetEncodingParams']['how'][x] == 'True': catEncHow = x break if self.targetColumn in catFtrs: catFtrs.remove(self.targetColumn) if len(catFtrs) > 0: from river.feature_extraction import TargetAgg if catEncHow.lower() == 'mean': agg_stat = stats.Mean() if catEncHow.lower() == 'bayesianmean' or catEncHow.lower() == 'bayesian mean': agg_stat = stats.BayesianMean(prior=0.5, prior_weight=50) self.incCatEncoder = TargetAgg( by=catFtrs[0], how=agg_stat) for col in catFtrs[1:]: self.incCatEncoder += TargetAgg( by=col, how=agg_stat) self.incCatEncoder|= compose.Discard(*catFtrs) #Scaling normalization_status = 'False' normalization_method = "" if 'normalization' in conf_json: nor_supported_methods = conf_json['normalization'] for k in nor_supported_methods.keys(): if conf_json['normalization'][k].lower() == 'true': normalization_status='True' normalization_method =k break if normalization_status.lower() == "true" and len(numFtrs) > 0: from sklearn.preprocessing import MinMaxScaler, StandardScaler, MaxAbsScaler if self.targetColumn in numFtrs: numFtrs.remove(self.targetColumn) if normalization_method.lower() =='standardscaler': self.incScaler = StandardScaler() elif normalization_method.lower() =='minmaxscaler' or normalization_method.lower() =='minmax': self.incScaler = MinMaxScaler() elif normalization_method.lower() =='maxabsscaler' or normalization_method.lower() =='maxabs': self.incScaler = MaxAbsScaler() else: self.incScaler = None #OutlierRemoval outlier_status = 'False' outlier_method = 'None' for x in list(conf_json['outlierDetection'].keys()): if conf_json['outlierDetection'][x] == 'True': outlier_method = x outlier_status = 'True' break if outlier_status and numFtrs: outlierMethodNames = list(conf_json['outlierDetectionParams'].keys()) if outlier_method.lower() == 'oneclasssvm' or outlier_method.lower() == 'one class svm': for x in outlierMethodNames: if x[0].lower() == 'o': key = x break params = conf_json['outlierDetectionParams'][key] self.log.info('<--- one class SVM with quantile filter --->') self.incOutlierRem = anomaly.QuantileFilter(anomaly.OneClassSVM(nu=float(params['nu'])),q=float(params['q'])) elif outlier_method.lower() =='halfspacetrees' or outlier_method.lower() =='half space trees': for x in outlierMethodNames: if x[0].lower() == 'h': key = x break params = conf_json['outlierDetectionParams'][key] self.log.info('<--- Half space trees with quantile filter --->') self.incOutlierRem = anomaly.QuantileFilter(anomaly.HalfSpaceTrees(n_trees=int(params['n_trees']),height=int(params['height']), window_size=int(params['window_size'])) ,q=float(params['q'])) else: self.log.info("No method is provided for outlier analysis") def getMissingVals(self,dataframe,columns,misval_ratio): try: self.log.info( '\n----------- Detecting for Missing Values -----------') nonNAArray=[] numOfRows = dataframe.shape[0] for i in columns: numNa=dataframe.loc[(pd.isna(dataframe[i])),i ].shape[0] nonNAArray.append(tuple([i,numNa])) for item in nonNAArray: numofMissingVals = item[1] if(numofMissingVals !=0): self.log.info('-------> Feature '+str(item[0])) self.log.info('----------> Number of Empty Rows '+str(numofMissingVals)) self.missingCols.append(item[0]) if(numofMissingVals >= numOfRows * misval_ratio): self.log.info('----------> Empty: Yes') self.log.info('----------> Permitted Rows: '+str(int(numOfRows * misval_ratio))) self.emptyCols.append(item[0]) if(len(self.missingCols) !=0): self.log.info( '----------- Detecting for Missing Values End -----------\n') return self.missingCols, self.emptyCols else: self.log.info( '-------> Missing Value Features :Not Any') self.log.info( '----------- Detecting for Missing Values End -----------\n') return self.missingCols, self.emptyCols except Exception as e: self.log.info("getMissingVals failed ==>" +str(e)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) return None, None def startIncProfiler(self,df,conf_json,targetFeature,deployLocation,problemType): try: self.targetColumn = targetFeature df, missingValFtrs, emptyFtrs, dataFtrs, allNumCols, allCatCols, textFtrs = self.featureAnalysis(df, conf_json, self.targetColumn) if len(textFtrs)>0: self.log.info('Text Features are not supported. Dropping '+str(textFtrs)[:500]) df = df.drop(columns=textFtrs) catFtrs = allCatCols.copy() numFtrs = allNumCols.copy() if self.targetColumn in catFtrs: catFtrs.remove(self.targetColumn) if targetFeature in allNumCols: numFtrs.remove(targetFeature) self.configDict['targetCol'] = self.targetColumn self.configDict['numFtrs'] = numFtrs self.configDict['catFtrs'] = catFtrs self.configDict['allNumCols'] = allNumCols self.configDict['allCatCols'] = allCatCols self.configDict['allFtrs'] = numFtrs+catFtrs try: self.log.info('\n---------- Creating Incremental profiler models ----------') self.createIncProfiler(df, conf_json, allNumCols, numFtrs, allCatCols, textFtrs, missingValFtrs) self.log.info('\n--------- Incremental profiler models have been created ---------') except Exception as inst: self.log.info("Error in creating Incremental profiler models"+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) raise try: #mvt # if missingValFtrs: if self.incFill['num_fill'] == 'drop': df = df.dropna(axis = 0, subset=allNumCols) self.configDict['num_fill'] = 'drop' elif self.incFill['num_fill'] == 'zero': df[allNumCols] = df[allNumCols].fillna(value = 0.0) self.configDict['num_fill'] = 'zero' else: df = df.astype(object).where(df.notna(), None) df[allNumCols]= df[allNumCols].apply(lambda row: self.apply_river_model(row.to_dict(), self.incFill ['num_fill']), axis='columns') self.configDict['num_fill'] = {col:self.incFill['num_fill'].stats[col].get() for col in allNumCols} if self.incFill['cat_fill'] == 'drop': df = df.dropna(axis = 0, subset=allCatCols) self.configDict['cat_fill'] = 'drop' elif self.incFill['cat_fill'] == 'zero': df[allCatCols] = df[allCatCols].fillna(value = 0.0) self.configDict['cat_fill'] = 'zero' else: df = df.astype(object).where(df.notna(), None) df[allCatCols]= df[allCatCols].apply(lambda row: self.apply_river_model(row.to_dict(), self.incFill['cat_fill']), axis='columns') self.configDict['cat_fill'] = {col:self.incFill['cat_fill'].stats[col].get() for col in allCatCols} self.log.info('\nStatus:- |... Missing value treatment done') except Exception as inst: self.log.info("Error in Missing value treatment "+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) raise try: #labelenc if self.incLabelMapping: df[targetFeature] = self.incLabelMapping.transform(df[targetFeature]) # self.configDict['labelMapping'] = self.le_dict except Exception as inst: self.log.info("Error in Label mapping "+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) raise try: #catenc if self.incCatEncoder: self.log.info('\n--------- Converting Non Numerical Categorical Features to Numerical Features ---------') self.encTarget = targetFeature if problemType.lower() == 'regression': from sklearn.preprocessing import StandardScaler sc = StandardScaler() self.encTarget = 'scaledTarget' df['scaledTarget'] = sc.fit_transform(df[targetFeature].to_numpy().reshape(-1,1)) encCols = catFtrs.copy() encCols.append(self.encTarget) self.configDict['encCols'] = encCols self.configDict['encTarget'] = self.encTarget transformed_data = df[encCols].apply(lambda row: self.apply_enc(row.to_dict()), axis='columns') if targetFeature in transformed_data.columns: transformed_data.drop(targetFeature, inplace=True, axis = 1) if problemType.lower() == 'regression': df.drop('scaledTarget', inplace=True, axis = 1) df[catFtrs] = transformed_data # self.log.info('Status:- |... Target Encoding state is as follows: ') self.configDict['catEnc'] = [] if len(catFtrs) == 1: col = catFtrs[0] self.configDict['catEnc'].append({col:self.incCatEncoder['TargetAgg'].state.to_dict()}) else: for i, col in enumerate(catFtrs): if i==0: no = '' else: no = str(i) self.configDict['catEnc'].append({col:self.incCatEncoder['TransformerUnion']['TargetAgg'+no].state.to_dict()}) # print(self.incCatEncoder['TransformerUnion']['TargetAgg'].state) # self.log.info(self.incCatEncoder) self.log.info('Status:- |... Categorical to numeric feature conversion done: '+str(len(catFtrs))+' features converted') except Exception as inst: self.log.info("Error in categorical encoding "+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) raise try: #scaler if self.incScaler: self.log.info("\n---------- Data Normalization has started ----------") self.incScaler = self.incScaler.partial_fit(df[numFtrs]) df[numFtrs] = self.incScaler.transform(df[numFtrs]) self.log.info( "---------- Normalization Done on Following features ----------") self.log.info(numFtrs) self.log.info('Status:- |... Normalization treatment done') except Exception as inst: self.log.info("Error in normalization "+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) raise try: #outlierrem if self.incOutlierRem: self.log.info('\n---------- Performing outlier analysis ----------') df = df[df[numFtrs].apply(lambda x: False if self.apply_od_pipe(x.to_dict()) else True, axis=1)] self.log.info('\n <--- dataframe after outlier analysis --->') df.reset_index(drop=True, inplace=True) self.log.info(df.head(5)) self.log.info('Status:- |... Outlier treatment done') self.log.info('\n <--- shape of dataframe after outlier analysis --->') self.log.info(df.shape) except Exception as inst: self.log.info("Error in outlier treatment "+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) raise #saveprofiler self.log.info('\n---------- Saving profiler models ----------') self.saveProfilerModels(deployLocation) self.log.info('<--- Profiler models saved at '+deployLocation+' --->') return df,targetFeature,missingValFtrs,numFtrs,catFtrs,self.le_dict,self.configDict,textFtrs,emptyFtrs,self.wordToNumericFeatures except Exception as inst: self.log.info("Error: dataProfiler failed "+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def transformData(self, df, targetFeature, missingValFtrs,numFtrs, catFtrs, textFtrs): try: df = df.drop_duplicates(keep="first") df = df.reset_index(drop=True) df = df.replace('-', np.nan) df = df.replace('?', np.nan) text_mv_cols = list(set(missingValFtrs).intersection(set(textFtrs))) if len(text_mv_cols)>0: df[text_mv_cols] = df[text_mv_cols].fillna(value = 'NA') if 'num_fill' in self.configDict: if self.configDict['num_fill'] == 'drop': df = df.dropna(axis = 0, subset=self.allNumCols) elif self.configDict['num_fill'] == 'zero': df[self.allNumCols] = df[self.allNumCols].fillna(value = 0.0) else: for x in self.allNumCols: df[x] = df[x].fillna(value = self.configDict['num_fill'][x]) if 'cat_fill' in self.configDict: if self.configDict['cat_fill'] == 'drop': df = df.dropna(axis = 0, subset=self.allCatCols) elif self.configDict['cat_fill'] == 'zero': df[self.allCatCols] = df[self.allCatCols].fillna(value = 0.0) else: for x in self.allCatCols: df[x] = df[x].fillna(value = self.configDict['cat_fill'][x]) if self.incLabelMapping: df[targetFeature] = self.incLabelMapping.transform(df[targetFeature]) if self.incCatEncoder: transformed_data = df[catFtrs].apply(lambda row: self.apply_enc(row.to_dict(), isTrain=False), axis='columns') df[catFtrs] = transformed_data if self.incScaler: df[numFtrs] = self.incScaler.transform(df[numFtrs]) return df except Exception as inst: self.log.info("Error: DataProfiling transformation failed "+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def checknumStr(self,dataframe,feature): try: dataframe[feature]=dataframe[feature].apply(lambda x: self.testStr(x)) return dataframe except: self.log.info("checknumStr failed") return dataframe #test whether the value is numeric /string def testStr(self,value): try: x=eval(value) return np.nan except: return value """ Missing values analysis Detects number of missing values in each column of dataframe """ def checksRows(self,dataframe,target_column,dataColumns): self.log.info( '\n----------- Checking Target Feature Empty Rows -----------') if self.targetColumn != '': numNa=dataframe.loc[(pd.isna(dataframe[self.targetColumn])),self.targetColumn].shape[0] self.log.info('------->No of Empty Rows in Target Fields: '+str(numNa)) if numNa >0: self.log.info('-------> Remove Empty Target Field Rows') dataframe = dataframe.dropna(axis=0, subset=[self.targetColumn]) self.log.info('-------> Remove Duplicate Rows') dataframe = dataframe.dropna(axis=0,how='all',subset=dataColumns) noofdplicaterows = dataframe.duplicated(keep='first').sum() dataframe = dataframe.drop_duplicates(keep="first") dataframe = dataframe.reset_index(drop=True) return dataframe,noofdplicaterows def apply_river_model(self, x, profModel): profModel.learn_one(x) return pd.Series(profModel.transform_one(x)) def apply_enc(self, x, isTrain=True): if isTrain: y = x[self.encTarget] self.incCatEncoder.learn_one(x, y) return pd.Series(self.incCatEncoder.transform_one(x)) def apply_od_pipe(self, x): score = self.incOutlierRem.score_one(x) is_anomaly = self.incOutlierRem.classify(score) self.incOutlierRem.learn_one(x) return is_anomaly #Convert Words To Number def s2n(self,value): try: x=eval(value) return x except: try: return w2n.word_to_num(value) except: return np.nan def convertWordToNumeric(self,dataframe,feature): try: dataframe[feature]=dataframe[feature].apply(lambda x: self.s2n(x)) return dataframe except Exception as inst: self.log.info("convertWordToNumeric Failed ===>"+str(inst)) return dataframe #test whether the value is numeric /string def testNum(self,value): try: x=eval(value) return x except: return np.nan ##check for numeric values in string column def checkNumeric(self,dataframe,feature): try: dataframe[feature]=dataframe[feature].apply(lambda x: self.testNum(x)) return dataframe except Exception as inst: self.log.info("checkNumeric Failed ===>"+str(inst)) return dataframe def smartFilter(self,feature,df,numericRatio): try: distinctCount = len(df[feature].unique()) numOfRows = df.shape[0] tempDataFrame=df.copy(deep=True) if(distinctCount != 1): self.log.info('-------> Feature :'+str(feature)) testDf = self.checkNumeric(tempDataFrame,feature) tempDf = testDf[feature] tempDf = tempDf.dropna() numberOfNonNullVals = tempDf.count() if(numberOfNonNullVals > int(numOfRows * numericRatio)): tempDataFrame=df.copy(deep=True) testDf = self.convertWordToNumeric(tempDataFrame,feature) tempDf = testDf[feature] tempDf = tempDf.dropna() self.log.info('----------> Numeric Status :Yes') return testDf,True else: #Wasnt't a numerical feature self.log.info('----------> Numeric Status :No') #numDf = self.checknumStr(df,feature) return df,False else: self.log.info( '\n---> No Numerics found in :' +str(feature)) return df,False except: self.log.info( '\n---> No Numerics found in :'+str(feature)) return df,False def dataFramecolType(self,dataFrame): dataFDtypes=[] try: dataColumns=list(dataFrame.columns) for i in dataColumns: dataType=dataFrame[i].dtypes dataFDtypes.append(tuple([i,str(dataType)])) return dataFDtypes except: self.log.info("error in dataFramecolyType") return dataFDtypes
riverML.py
import logging import pickle import os import sys import pandas as pd from river import stream from river.linear_model import LogisticRegression, SoftmaxRegression, LinearRegression from river.tree import ExtremelyFastDecisionTreeClassifier, HoeffdingAdaptiveTreeRegressor # from river.ensemble import AdaptiveRandomForestRegressor, AdaptiveRandomForestClassifier from river.neighbors import KNNClassifier, KNNRegressor from river.multiclass import OneVsRestClassifier from river.optim import SGD, Adam, AdaDelta, NesterovMomentum, RMSProp # from river.optim.losses import CrossEntropy, Log, MultiClassLoss, Poisson, RegressionLoss, BinaryLoss, Huber # from river.optim.initializers import Normal class riverML(object): def __init__(self): self.algoDict={'Online Logistic Regression':LogisticRegression, 'Online Softmax Regression':SoftmaxRegression, 'Online Decision Tree Classifier':ExtremelyFastDecisionTreeClassifier, 'Online KNN Classifier':KNNClassifier,'Online Linear Regression':LinearRegression, 'Online Decision Tree Regressor':HoeffdingAdaptiveTreeRegressor, 'Online KNN Regressor':KNNRegressor} self.optDict={'sgd': SGD, 'adam':Adam, 'adadelta':AdaDelta, 'nesterovmomentum':NesterovMomentum, 'rmsprop':RMSProp} self.log = logging.getLogger('eion') def getPrediction(self, model,X): testStream = stream.iter_pandas(X) preds = [] for (xi,yi) in testStream: pred = model.predict_one(xi) preds.append(pred) return pd.DataFrame(preds) def startLearn(self,problemType,algoName,params,xtrain,ytrain,noOfClasses=None): try: model = self.algoDict[algoName] params = self.parseParams(params, algoName) if problemType == 'classification': if noOfClasses>2: model = OneVsRestClassifier(classifier=model(**params)) else: model = model(**params) else: model = model(**params) trainStream = stream.iter_pandas(xtrain, ytrain) #head start for i, (xi, yi) in enumerate(trainStream): if i>100: break if yi!=None: model.learn_one(xi, yi) trainPredictedData = [] trainStream = stream.iter_pandas(xtrain, ytrain) for i, (xi, yi) in enumerate(trainStream): if yi!=None: trainPredictedData.append(model.predict_one(xi)) model.learn_one(xi, yi) trainPredictedData = pd.DataFrame(trainPredictedData) return algoName, params, model, trainPredictedData except Exception as inst: self.log.info( '\n-----> '+algoName+' failed!!!.'+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def parseParams(self, params, algoName): try: from learner.parameters import parametersDefine paramsObj = parametersDefine() paramDict =paramsObj.paramDefine(params,method=None) paramDict = {k:v[0] for k,v in paramDict.items()} if algoName=='Online Logistic Regression' or algoName=='Online Softmax Regression' or algoName=='Online Linear Regression': opt = self.optDict[paramDict.pop('optimizer').lower()] lr = float(paramDict.pop('optimizer_lr')) paramDict['optimizer'] = opt(lr) return paramDict except Exception as inst: self.log.info( '\n-----> Parameter parsing failed!!!.'+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
ensemble_bagging.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import time import os import sys import numpy as np import pandas as pd from sklearn import model_selection from sklearn.model_selection import train_test_split, KFold, cross_val_score from sklearn.model_selection import KFold #Classification metrics lib import logging import warnings warnings.filterwarnings('always') # "error", "ignore", "always", "default", "module" or "once" from learner.aion_matrix import aion_matrix from sklearn.preprocessing import binarize class ensemble_bagging(): def __init__(self,ensemble_params,scoreParam,MakeFP0,MakeFN0): self.ensemble_params = ensemble_params self.scoreParam=scoreParam self.MakeFP0 = MakeFP0 self.MakeFN0 = MakeFN0 self.log = logging.getLogger('eion') def add_alg2dict(self,k,v): b_dict={} b_dict[k]=v return b_dict def getSelected_algs_params(self,problemType,ensembleType,ensembleConfig): from learner.parameters import parametersDefine paramObj=parametersDefine() ensClass_algs_params={} algs_status={} for key,val in ensembleConfig.items(): for s,p in val.items(): if (s == "enable" and p == "True"): params = val['param'] params_eval = paramObj.paramDefine(params,None) params_eval = {param_key: param_value[0] for param_key, param_value in params_eval.items()} ensClass_algs_params[key]=params_eval else: pass return ensClass_algs_params def listEnsembleClassBaggingAlgs(self,ensClass_algs_params): from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import BaggingClassifier, ExtraTreesClassifier, RandomForestClassifier ensembleBaggingClassList=list() for key,val in ensClass_algs_params.items(): if (key == 'Logistic Regression'): lr=LogisticRegression() lr=lr.set_params(**val) ensembleBaggingClassList.append(lr) elif (key == 'Support Vector Machine'): svm=SVC() svm=svm.set_params(**val) ensembleBaggingClassList.append(svm) elif (key == 'Naive Bayes'): nb=GaussianNB() nb=nb.set_params(**val) ensembleBaggingClassList.append(nb) elif (key == 'K Nearest Neighbors'): knn=KNeighborsClassifier() knn=knn.set_params(**val) ensembleBaggingClassList.append(knn) elif (key == 'Decision Tree'): dt=DecisionTreeClassifier() dt=dt.set_params(**val) ensembleBaggingClassList.append(dt) elif (key == 'Random Forest'): rf=RandomForestClassifier() rf=rf.set_params(**val) ensembleBaggingClassList.append(rf) else: pass return ensembleBaggingClassList def listEnsembleRegBaggingAlgs(self,ensReg_algs_params): from sklearn.linear_model import Ridge from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor from sklearn.tree import DecisionTreeRegressor ensembleBaggingRegList=list() for key,val in ensReg_algs_params.items(): if (key == 'Linear Regression'): lir=LinearRegression() lir=lir.set_params(**val) ensembleBaggingRegList.append(lir) elif (key == 'Decision Tree'): dtr=DecisionTreeRegressor() dtr=dtr.set_params(**val) ensembleBaggingRegList.append(dtr) elif (key == 'Ridge'): ridge=Ridge() ridge=ridge.set_params(**val) ensembleBaggingRegList.append(ridge) else: ensembleBaggingRegList=[] return ensembleBaggingRegList def ensemble_bagging_classifier(self,X_train,y_train, X_test, y_test): ## New changes from sklearn.ensemble import BaggingClassifier, ExtraTreesClassifier, RandomForestClassifier ensemble_method = "Bagging_classifier" problemType='classification' ensembleType='bagging' model_dict=self.ensemble_params ensClass_algs_params = self.getSelected_algs_params(problemType,ensembleType,model_dict) ensembleBaggingList = self.listEnsembleClassBaggingAlgs(ensClass_algs_params) # clf_array = model_list clf_array=ensembleBaggingList # no. of base classifier num_trees = len(clf_array) # max_samples=float(max_samples) n_estimators = num_trees # random_state=seed bagging_mean={} bagging_std={} accuracy_basealgs_train={} accuracy_basealgs_test={} blable="" accuracy_score_test=0 kfold = model_selection.KFold(n_splits=10, random_state=None) bestScore=-0xFFFF scoredetails = '' threshold = -1 bestthreshold = -1 precisionscore =-1 bestprecisionscore=-1 recallscore = -1 bestrecallscore=-1 objClf = aion_matrix() if (ensemble_method == "Bagging_classifier"): #bagging ensemble of base classifier .e.g. KNeighborsClassifier base estimators, each built on random subsets of 40% of the samples and 50% of the features. for clf in clf_array: self.log.info('-----------> Ensemble Algorithm '+str(clf.__class__.__name__)) clf.fit(X_train, y_train) bagging_clf = BaggingClassifier(clf,n_estimators = num_trees, random_state=10) bagging_clf.fit(X_train, y_train) bagging_scores = cross_val_score(bagging_clf, X_train, y_train, cv=kfold,n_jobs=-1) #bagging_ensemble_t=bagging_clf.fit(X_train, y_train) if not X_test.empty: bag_predict=bagging_clf.predict(X_test) accuracy_score_test = objClf.get_score(self.scoreParam,y_test,bag_predict) else: accuracy_score_test = bagging_scores MakeFP0 = False MakeFN0 = False if self.MakeFP0: self.log.info('-------- Ensemble: Calculate Threshold for FP Start-------') startRange = 0.0 endRange = 1.0 stepsize = 0.01 threshold_range = np.arange(startRange,endRange,stepsize) threshold,precisionscore,recallscore = objClf.check_threshold(bagging_clf,X_train,y_train,threshold_range,'FP','') MakeFP0 = True self.log.info('-------- Calculate Threshold for FP End-------') if self.MakeFN0: self.log.info('-------- Ensemble: Calculate Threshold for FN Start-------') startRange = 1.0 endRange = 0.0 stepsize = -0.01 threshold_range = np.arange(startRange,endRange,stepsize) threshold,precisionscore,recallscore = objClf.check_threshold(bagging_clf,X_train,y_train,threshold_range,'FN','') MakeFN0 = True self.log.info('-------- Calculate Threshold for FN End-------') if threshold != -1: if not X_test.empty: predictedData = bagging_clf.predict_proba(X_test) predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold) #bug 12437 accuracy_score_test = objClf.get_score(self.scoreParam,y_test,predictedData) status,bscore,bthres,brscore,bpscore = objClf.getBestModel(MakeFP0,MakeFN0,threshold,bestthreshold,recallscore,bestrecallscore,precisionscore,bestprecisionscore,accuracy_score_test,bestScore) if status: bestScore =bscore bestModel =bagging_clf.__class__.__name__ bestEstimator=bagging_clf bestthreshold = bthres bestBaseModel = clf.__class__.__name__ bestrecallscore = brscore bestprecisionscore = bpscore else: pass best_alg_name=bestEstimator.__class__.__name__ self.log.info('-----------> Best Bagging Classifier Model '+str(bestBaseModel)) self.log.info('-----------> Best Score '+str(bestScore)) # self.log.info('-----------> Threshold '+str(bestthreshold)) #bug 12438 if bestthreshold != -1: if not X_test.empty: predictedData_test = bestEstimator.predict_proba(X_test) predictedData_test = binarize(predictedData_test[:,1].reshape(-1, 1),threshold=bestthreshold) #bug 12437 predictedData_train = bestEstimator.predict_proba(X_train) predictedData_train = binarize(predictedData_train[:,1].reshape(-1, 1),threshold=bestthreshold) #bug 12437 else: if not X_test.empty: predictedData_test = bestEstimator.predict(X_test) predictedData_train = bestEstimator.predict(X_train) return bestEstimator,bestEstimator.get_params(),bestScore,best_alg_name,bestthreshold,bestprecisionscore,bestrecallscore def ensemble_bagging__regressor(self,X_train,y_train, X_test, y_test): from sklearn.ensemble import BaggingRegressor ensemble_method='Bagging_regressor' problemType='regression' ensembleType='bagging' model_dict=self.ensemble_params ensReg_algs_params = self.getSelected_algs_params(problemType,ensembleType,model_dict) ensembleBaggingList = self.listEnsembleRegBaggingAlgs(ensReg_algs_params) scoredetails = '' aion_matrixobj = aion_matrix() reg_array = ensembleBaggingList num_trees = len(reg_array) #self.log.info(num_trees) # max_samples=float(max_samples) n_estimators = num_trees r_state=10 bestModel='' bestParams={} bestScore=-sys.float_info.max #extension of bugfix 11656 objClf = aion_matrix() for reg in reg_array: self.log.info('-----------> Ensemble Algorithm '+str(reg.__class__.__name__)) nmodel=reg.fit(X_train, y_train) model = reg.__class__.__name__ estimator = BaggingRegressor(base_estimator=reg, random_state=r_state) bagging_ensemble_t=estimator.fit(X_train, y_train) predictedData = estimator.predict(X_test) score = objClf.get_score(self.scoreParam,y_test,predictedData) if self.scoreParam == "r2": if score > bestScore: bestScore =score bestModel =model bestEstimator=estimator else: if abs(score) < bestScore or bestScore == -sys.float_info.max: #extension of bugfix 11656 bestScore =abs(score) bestModel =model bestEstimator=estimator best_alg_name=bestEstimator.__class__.__name__ self.log.info('-----------> Best Ensemble Algorithm '+str(bestModel)) return bestEstimator,bestEstimator.get_params(),bestScore,best_alg_name
ensemble_voting.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import numpy as np import logging import warnings from sklearn.ensemble import VotingClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import Ridge from sklearn.preprocessing import binarize from sklearn.ensemble import VotingRegressor from sklearn.svm import SVC from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor from sklearn.tree import DecisionTreeRegressor from learner.aion_matrix import aion_matrix warnings.filterwarnings('always') class ensemble_voting(): def __init__(self,ensemble_params,scoreParam): self.ensemble_params = ensemble_params self.scoreParam=scoreParam self.final_estimator_r='' self.final_estimator_c='' self.log = logging.getLogger('eion') ''' Read the aion config "Ensemble-Voting", parse the algorithm and associated params based on enable or True status.Not used now ''' def getSelected_algs_params(self,problemType,ensembleType,ensembleConfig): from learner.parameters import parametersDefine paramObj=parametersDefine() ensClass_algs_params={} # algs_status={} for key,val in ensembleConfig.items(): for s,p in val.items(): if (s == "enable" and p == "True"): params = val['param'] params_eval = paramObj.paramDefine(params,None) params_eval = {param_key: param_value[0] for param_key, param_value in params_eval.items()} ensClass_algs_params[key]=params_eval else: pass return ensClass_algs_params ''' To make array of voting algorithm based on user config list. Not used now, in future if needed similar line with bagging ensemble, please use this. ''' def listEnsembleClassVotingAlgs(self,ensClass_algs_params): ensembleVotingClassList=list() for key,val in ensClass_algs_params.items(): if (key == 'Logistic Regression'): lr=LogisticRegression() lr=lr.set_params(**val) ensembleVotingClassList.append(lr) elif (key == 'Support Vector Machine'): svm=SVC() svm=svm.set_params(**val) ensembleVotingClassList.append(svm) elif (key == 'Naive Bayes'): nb=GaussianNB() nb=nb.set_params(**val) ensembleVotingClassList.append(nb) elif (key == 'K Nearest Neighbors'): knn=KNeighborsClassifier() knn=knn.set_params(**val) ensembleVotingClassList.append(knn) elif (key == 'Decision Tree'): dt=DecisionTreeClassifier() dt=dt.set_params(**val) ensembleVotingClassList.append(dt) elif (key == 'Random Forest'): rf=RandomForestClassifier() rf=rf.set_params(**val) ensembleVotingClassList.append(rf) else: ## Algorithm not found in config, so forming empty alg list. If needs, make list with default alg. ensembleVotingClassList=[] pass return ensembleVotingClassList ''' To make array of voting regression algorithm based on user config list. Not used now, in future if needed similar line with bagging ensemble, please use this. ''' def listEnsembleRegVotingAlgs(self,ensReg_algs_params): ensembleVotingRegList=list() for key,val in ensReg_algs_params.items(): if (key == 'Linear Regression'): lir=LinearRegression() lir=lir.set_params(**val) ensembleVotingRegList.append(lir) elif (key == 'Decision Tree'): dtr=DecisionTreeRegressor() dtr=dtr.set_params(**val) ensembleVotingRegList.append(dtr) elif (key == 'Ridge'): ridge=Ridge() ridge=ridge.set_params(**val) ensembleVotingRegList.append(ridge) else: ## Algorithm not found in config, so forming empty alg list. If needs, make list with default alg. ensembleVotingRegList=[] return ensembleVotingRegList def ensemble_voting_classifier(self,X_train,y_train, X_test, y_test,MakeFP0,MakeFN0,modelList): #bug 12437 status='ERROR' model=None estimator=None score=None params=None threshold = -1 precisionscore =-1 recallscore = -1 objClf = aion_matrix() try: lr = LogisticRegression(solver='lbfgs',random_state=1,max_iter=200) rf = RandomForestClassifier(random_state=1) gnb = GaussianNB() svc = SVC(probability=True) #Need to keep probability=True, because cross_val_score,predict_proba fn calls knn=KNeighborsClassifier(n_neighbors=5) base_estimators = [] if 'Logistic Regression' in modelList: base_estimators.append(('LogisticRegression', lr)) self.log.info('-------- Ensemble: Logistic Regression-------') if 'Random Forest' in modelList: base_estimators.append(('RandomForestClassifier', rf)) self.log.info('-------- Ensemble: Random Forest-------') if 'Naive Bayes' in modelList: base_estimators.append(('GaussianNB', gnb)) self.log.info('-------- Ensemble: Naive Bayes-------') if 'Support Vector Machine' in modelList: self.log.info('-------- Ensemble: Support Vector Machine-------') base_estimators.append(('SVC', svc)) if 'K Nearest Neighbors' in modelList: base_estimators.append(('KNeighborsClassifier', knn)) self.log.info('-------- Ensemble: K Nearest Neighbors-------') if len(base_estimators) == 0: self.log.info('-------- Ensemble Voting is only supported for Logistic Regression, Random Forest Classifier, Naive Bayes, SVM and KNN -------') status = "UNSUPPORTED" return status, estimator,params,score,model,threshold,precisionscore,recallscore eclf1 = VotingClassifier(base_estimators, voting='soft') eclf1.fit(X_train, y_train) y_predict = eclf1.predict(X_test) score = objClf.get_score(self.scoreParam,y_test,y_predict) self.log.info('-------- Ensemble (VoteClassifier) Soft Score:'+str(score)) if MakeFP0: self.log.info('-------- Ensemble: Calculate Threshold for FP Start-------') startRange = 0.0 endRange = 1.0 stepsize = 0.01 threshold_range = np.arange(startRange,endRange,stepsize) threshold,precisionscore,recallscore = objClf.check_threshold(eclf1,X_train,y_train,threshold_range,'FP','') self.log.info('-------- Calculate Threshold for FP End-------') elif MakeFN0: self.log.info('-------- Ensemble: Calculate Threshold for FN Start-------') startRange = 1.0 endRange = 0.0 stepsize = -0.01 threshold_range = np.arange(startRange,endRange,stepsize) threshold,precisionscore,recallscore = objClf.check_threshold(eclf1,X_train,y_train,threshold_range,'FN','') self.log.info('-------- Calculate Threshold for FN End-------') if threshold != -1: predictedData = eclf1.predict_proba(X_test) predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold) #bug 12437 score = objClf.get_score(self.scoreParam,y_test,predictedData) status = 'SUCCESS' model =eclf1.__class__.__name__ estimator=eclf1 params = estimator.get_params() #bug 12437 - Removed ensemble hard voting as predict_proba in the later stages will break except Exception as Inst: #bug 12437 self.log.info('--------- Error in Ensemble Voting ---------\n') self.log.info(str(Inst)) return status,estimator,params,score,model,threshold,precisionscore,recallscore def ensemble_voting__regressor(self,X_train,y_train, X_test, y_test,modelList): scoredetails = '' vr_predict=None vr_model=None try: lr = LinearRegression() rfr = RandomForestRegressor(n_estimators=10, random_state=1) dtr=DecisionTreeRegressor() base_estimators = [] if 'Linear Regression' in modelList: base_estimators.append(('LinearRegression', lr)) if 'Decision Tree' in modelList: base_estimators.append(('DecisionTreeRegressor', dtr)) if 'Random Forest' in modelList: base_estimators.append(('RandomForestRegressor', rfr)) if len(base_estimators) == 0: base_estimators = [('LinearRegression', lr), ('RandomForestRegressor', rfr),('DecisionTreeRegressor', dtr)] voting_reg = VotingRegressor(base_estimators) vr_model=voting_reg.fit(X_train,y_train) vr_predict=voting_reg.predict(X_test) best_vr_alg=voting_reg.__class__.__name__ self.log.info('-----------> Voting regression Model '+str(best_vr_alg)) except Exception as e: self.log.info("voting regression Exception info: \n") self.log.info(e) aion_matrixobj = aion_matrix() score = aion_matrixobj.get_score(self.scoreParam,y_test,vr_predict) return voting_reg,voting_reg.get_params(),score,best_vr_alg
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
ensemble_stacking.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import numpy as np #Classification metrics lib import logging import warnings from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.preprocessing import binarize from sklearn.svm import SVC from sklearn.ensemble import StackingClassifier from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import StackingRegressor from sklearn.svm import LinearSVR from sklearn.linear_model import RidgeCV from sklearn.linear_model import LassoCV from learner.aion_matrix import aion_matrix warnings.filterwarnings('always') # "error", "ignore", "always", "default", "module" or "once" class ensemble_stacking(): def __init__(self,ensemble_params,scoreParam): self.ensemble_params = ensemble_params self.scoreParam=scoreParam self.final_estimator_r='' self.final_estimator_c='' self.log = logging.getLogger('eion') ## Read the aion config "Ensemble-Stacking", parse the algorithm and associated params based on enable or True status. def getSelected_algs_params(self,problemType,ensembleType,ensembleConfig): from learner.parameters import parametersDefine paramObj=parametersDefine() ensClass_algs_params={} # algs_status={} for key,val in ensembleConfig.items(): for s,p in val.items(): if (s == "enable" and p == "True"): params = val['param'] params_eval = paramObj.paramDefine(params,None) params_eval = {param_key: param_value[0] for param_key, param_value in params_eval.items()} ensClass_algs_params[key]=params_eval else: pass return ensClass_algs_params ## To make array of stacking algorithm based on user config list. Not used now, in future if needed similar line with bagging ensemble, please use this. def listEnsembleClassStackingAlgs(self,ensClass_algs_params): ensembleBaggingClassList=list() for key,val in ensClass_algs_params.items(): # print(key) if (key == 'Logistic Regression'): lr=LogisticRegression() lr=lr.set_params(**val) ensembleBaggingClassList.append(lr) elif (key == 'Support Vector Machine'): svm=SVC() svm=svm.set_params(**val) ensembleBaggingClassList.append(svm) elif (key == 'Naive Bayes'): nb=GaussianNB() nb=nb.set_params(**val) ensembleBaggingClassList.append(nb) elif (key == 'K Nearest Neighbors'): knn=KNeighborsClassifier() knn=knn.set_params(**val) ensembleBaggingClassList.append(knn) elif (key == 'Decision Tree'): dt=DecisionTreeClassifier() dt=dt.set_params(**val) ensembleBaggingClassList.append(dt) elif (key == 'Random Forest'): rf=RandomForestClassifier() rf=rf.set_params(**val) ensembleBaggingClassList.append(rf) else: ensembleBaggingClassList=[] pass return ensembleBaggingClassList ## To make array of stacking regression algorithm based on user config list. Not used now, in future if needed similar line with bagging ensemble, please use this. def listEnsembleRegStackingAlgs(self,ensReg_algs_params): ensembleBaggingRegList=list() for key,val in ensReg_algs_params.items(): if (key == 'LinearSVR'): lir=LinearSVR() lir=lir.set_params(**val) ensembleBaggingRegList.append(lir) elif (key == 'LinearRegression'): lr=LinearRegression() lr=lr.set_params(**val) ensembleBaggingRegList.append(lr) elif (key == 'LassoCV'): lcv=LassoCV() lcv=lcv.set_params(**val) ensembleBaggingRegList.append(lcv) elif (key == 'RandomForestRegressor'): rfr=RandomForestRegressor() rfr=rfr.set_params(**val) ensembleBaggingRegList.append(rfr) elif (key == 'RidgeCV'): ridge=RidgeCV() ridge=ridge.set_params(**val) ensembleBaggingRegList.append(ridge) else: ## NO algorithms found in configuration settings, instead of sending empty array,we can add any one of algorithms. ensembleBaggingRegList=[] return ensembleBaggingRegList def extract_params(self,dict): self.dict=dict for k,v in self.dict.items(): return k,v def stacking_params(self): for k,v in self.ensemble_params.items(): try: if (k == "max_features_percentage"): max_features_percentage=float(v) elif (k == "max_samples"): max_samples=float(v) elif (k == "seed"): seed=int(v) elif (k == "final_estimator_stack_c"): final_estimator_c=str(v) elif (k == "final_estimator_stack_r"): final_estimator_r=str(v) else: self.log.info("Invalid Param in ensemble advanced configuration.\n") except Exception as e: self.log.info("\n Ensemble config param parsing error"+str(e)) continue return final_estimator_c,final_estimator_r,seed,max_samples,max_features_percentage def ensemble_stacking_classifier(self,X_train,y_train, X_test, y_test,MakeFP0,MakeFN0,modelList): final_estimator_c,final_estimator_r,seed,max_samples,max_features_percentage= self.stacking_params() final_estimator_c="" final_estimator=final_estimator_c scoredetails='' lr = LogisticRegression(solver='lbfgs',random_state=1,max_iter=200) rf = RandomForestClassifier(random_state=2) gnb = GaussianNB() svc = SVC(probability=True) #Need to keep probability=True, because of cross_val_score,predict_proba fn calls knn=KNeighborsClassifier(n_neighbors=5) try: if (final_estimator == 'LogisticRegression'): final_estimator_a=lr elif (final_estimator == 'RandomForestClassifier'): final_estimator_a=rf elif (final_estimator == 'GaussianNB'): final_estimator_a=gnb elif (final_estimator == 'SVC'): final_estimator_a=svc elif (final_estimator == 'KNeighborsClassifier'): final_estimator_a=knn else: final_estimator_a=lr except Exception as e: final_estimator_a=lr self.log.info("Given stacking regression final estimator algorithm issue, using default one (LogisticRegression) as final_estimator now.\n") self.log.info(e) #stacking estimators base_estimators = [] if 'Logistic Regression' in modelList: base_estimators.append(('LogisticRegression', lr)) if 'Random Forest' in modelList: base_estimators.append(('RandomForestClassifier', rf)) if 'Naive Bayes' in modelList: base_estimators.append(('GaussianNB', gnb)) if 'Support Vector Machine' in modelList: base_estimators.append(('SVC', svc)) if 'K Nearest Neighbors' in modelList: base_estimators.append(('KNeighborsClassifier', knn)) if len(base_estimators) == 0: base_estimators = [('LogisticRegression', lr),('RandomForestClassifier', rf),('GaussianNB', gnb),('SVC', svc),('KNeighborsClassifier', knn)] stacking_c = StackingClassifier(estimators=base_estimators, final_estimator=final_estimator_a) stacking_c.fit(X_train, y_train) y_predict=stacking_c.predict(X_test) objClf = aion_matrix() accuracy_score_test = objClf.get_score(self.scoreParam,y_test,y_predict) MakeFP0 = False MakeFN0 = False threshold = -1 recallscore = -1 precisionscore =-1 if MakeFP0: self.log.info('-------- Ensemble: Calculate Threshold for FP Start-------') startRange = 0.0 endRange = 1.0 stepsize = 0.01 threshold_range = np.arange(startRange,endRange,stepsize) threshold,precisionscore,recallscore = objClf.check_threshold(stacking_c,X_train,y_train,threshold_range,'FP','') MakeFP0 = True self.log.info('-------- Calculate Threshold for FP End-------') elif MakeFN0: self.log.info('-------- Ensemble: Calculate Threshold for FN Start-------') startRange = 1.0 endRange = 0.0 stepsize = -0.01 threshold_range = np.arange(startRange,endRange,stepsize) threshold,precisionscore,recallscore = objClf.check_threshold(stacking_c,X_train,y_train,threshold_range,'FN','') MakeFN0 = True self.log.info('-------- Calculate Threshold for FN End-------') if threshold != -1: predictedData = stacking_c.predict_proba(X_test) predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold) #bug 12437 accuracy_score_test = objClf.get_score(self.scoreParam,y_test,predictedData) best_alg_stacking=stacking_c.__class__.__name__ self.log.info('-----------> Best Stacking Classifier Model '+str(best_alg_stacking)) self.log.info('-----------> Best Score '+str(accuracy_score_test)) return stacking_c,stacking_c.get_params(),accuracy_score_test,best_alg_stacking,threshold,precisionscore,recallscore def ensemble_stacking__regressor(self,X_train,y_train, X_test, y_test,modelList): final_estimator_c,final_estimator_r,seed,max_samples,max_features_percentage= self.stacking_params() final_estimator=final_estimator_r final_estimator_a=None scoredetails='' lr=LinearRegression() rcv=RidgeCV() svr=LinearSVR() lcv=LassoCV() rf=RandomForestRegressor(random_state=42) try: if (final_estimator == 'LinearRegression'): final_estimator_a=lr if (final_estimator == 'RidgeCV'): final_estimator_a=rcv elif (final_estimator == 'LinearSVR'): final_estimator_a=svr elif (final_estimator == 'LassoCV'): final_estimator_a=lcv elif (final_estimator == 'RandomForestRegressor'): final_estimator_a=rf else: #default is RidgeCV final_estimator_a=rcv except Exception as e: self.log.info("stacking regression Exception info: \n") self.log.info(e) final_estimator_a=rcv base_estimators = [] if 'Linear Regression' in modelList: base_estimators.append(('LinearRegression', lr)) if 'Ridge' in modelList: base_estimators.append(('RidgeCV', rcv)) if 'LinearSVR' in modelList: base_estimators.append(('LinearSVR', svr)) if 'Lasso' in modelList: base_estimators.append(('LassoCV', lcv)) if 'Random Forest' in modelList: base_estimators.append(('RandomForestRegressor', rf)) if len(base_estimators) == 0: base_estimators = [('LinearRegression', lr),('RidgeCV', rcv),('LinearSVR', svr),('LassoCV', lcv),('RandomForestRegressor', rf)] self.log.info("Stacking Base Alogs :"+str(base_estimators)) self.log.info("Final Estimator :"+final_estimator) stacking_regressor = StackingRegressor(estimators=base_estimators,final_estimator=final_estimator_a) stacking_r_model=stacking_regressor.fit(X_train, y_train) stacking_rpredict=stacking_regressor.predict(X_test) best_stacking_alg=stacking_regressor.__class__.__name__ #Accuracy accuracy_score_best=stacking_regressor.score(X_test, y_test) aion_matrixobj = aion_matrix() score = aion_matrixobj.get_score(self.scoreParam,y_test,stacking_rpredict) return stacking_regressor,stacking_regressor.get_params(),score,best_stacking_alg
__init__.py
from .genetic_optimization import GeneticOptimizationCV
genetic_optimization.py
from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import SGDClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LinearRegression from sklearn.linear_model import Lasso from sklearn.linear_model import Ridge from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor import warnings warnings.filterwarnings('ignore') from genetic_selection import GeneticSelectionCV def GeneticOptimizationCV(model,genetic_params,n_iter,scoring,cv): n_generations = n_iter estimator = model selector = GeneticSelectionCV(estimator,cv=cv,**genetic_params,n_generations=n_generations,scoring=scoring) return selector
pipelines.py
import itertools import logging from typing import Optional, Dict, Union from nltk import sent_tokenize import torch from transformers import( AutoModelForSeq2SeqLM, AutoTokenizer, PreTrainedModel, PreTrainedTokenizer, ) logger = logging.getLogger(__name__) class QGPipeline: """Poor man's QG pipeline""" def __init__( self, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, ans_model: PreTrainedModel, ans_tokenizer: PreTrainedTokenizer, qg_format: str, use_cuda: bool ): self.model = model self.tokenizer = tokenizer self.ans_model = ans_model self.ans_tokenizer = ans_tokenizer self.qg_format = qg_format self.device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu" self.model.to(self.device) if self.ans_model is not self.model: self.ans_model.to(self.device) assert self.model.__class__.__name__ in ["T5ForConditionalGeneration", "BartForConditionalGeneration"] if "T5ForConditionalGeneration" in self.model.__class__.__name__: self.model_type = "t5" else: self.model_type = "bart" def __call__(self, inputs: str): inputs = " ".join(inputs.split()) sents, answers = self._extract_answers(inputs) flat_answers = list(itertools.chain(*answers)) if len(flat_answers) == 0: return [] if self.qg_format == "prepend": qg_examples = self._prepare_inputs_for_qg_from_answers_prepend(inputs, answers) else: qg_examples = self._prepare_inputs_for_qg_from_answers_hl(sents, answers) qg_inputs = [example['source_text'] for example in qg_examples] questions = self._generate_questions(qg_inputs) output = [{'answer': example['answer'], 'question': que} for example, que in zip(qg_examples, questions)] return output def _generate_questions(self, inputs): inputs = self._tokenize(inputs, padding=True, truncation=True) outs = self.model.generate( input_ids=inputs['input_ids'].to(self.device), attention_mask=inputs['attention_mask'].to(self.device), max_length=32, num_beams=4, ) questions = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in outs] return questions def _extract_answers(self, context): sents, inputs = self._prepare_inputs_for_ans_extraction(context) inputs = self._tokenize(inputs, padding=True, truncation=True) outs = self.ans_model.generate( input_ids=inputs['input_ids'].to(self.device), attention_mask=inputs['attention_mask'].to(self.device), max_length=32, ) dec = [self.ans_tokenizer.decode(ids, skip_special_tokens=False) for ids in outs] answers = [item.split('<sep>') for item in dec] answers = [i[:-1] for i in answers] return sents, answers def _tokenize(self, inputs, padding=True, truncation=True, add_special_tokens=True, max_length=512 ): inputs = self.tokenizer.batch_encode_plus( inputs, max_length=max_length, add_special_tokens=add_special_tokens, truncation=truncation, padding="max_length" if padding else False, pad_to_max_length=padding, return_tensors="pt" ) return inputs def _prepare_inputs_for_ans_extraction(self, text): sents = sent_tokenize(text) inputs = [] for i in range(len(sents)): source_text = "extract answers:" for j, sent in enumerate(sents): if i == j: sent = "<hl> %s <hl>" % sent source_text = "%s %s" % (source_text, sent) source_text = source_text.strip() if self.model_type == "t5": source_text = source_text + " </s>" inputs.append(source_text) return sents, inputs def _prepare_inputs_for_qg_from_answers_hl(self, sents, answers): inputs = [] for i, answer in enumerate(answers): if len(answer) == 0: continue for answer_text in answer: sent = sents[i] sents_copy = sents[:] answer_text = answer_text.strip() ans_start_idx = 0 # ans_start_idx = sent.index(answer_text) # if answer_text in sent: # ans_start_idx = sent.index(answer_text) # else: # continue sent = f"{sent[:ans_start_idx]} <hl> {answer_text} <hl> {sent[ans_start_idx + len(answer_text): ]}" sents_copy[i] = sent source_text = " ".join(sents_copy) source_text = f"generate question: {source_text}" if self.model_type == "t5": source_text = source_text + " </s>" inputs.append({"answer": answer_text, "source_text": source_text}) return inputs def _prepare_inputs_for_qg_from_answers_prepend(self, context, answers): flat_answers = list(itertools.chain(*answers)) examples = [] for answer in flat_answers: source_text = f"answer: {answer} context: {context}" if self.model_type == "t5": source_text = source_text + " </s>" examples.append({"answer": answer, "source_text": source_text}) return examples class MultiTaskQAQGPipeline(QGPipeline): def __init__(self, **kwargs): super().__init__(**kwargs) def __call__(self, inputs: Union[Dict, str]): if type(inputs) is str: # do qg return super().__call__(inputs) else: # do qa return self._extract_answer(inputs["question"], inputs["context"]) def _prepare_inputs_for_qa(self, question, context): source_text = f"question: {question} context: {context}" if self.model_type == "t5": source_text = source_text + " </s>" return source_text def _extract_answer(self, question, context): source_text = self._prepare_inputs_for_qa(question, context) inputs = self._tokenize([source_text], padding=False) outs = self.model.generate( input_ids=inputs['input_ids'].to(self.device), attention_mask=inputs['attention_mask'].to(self.device), max_length=16, ) answer = self.tokenizer.decode(outs[0], skip_special_tokens=True) return answer class E2EQGPipeline: def __init__( self, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, use_cuda: bool ) : self.model = model self.tokenizer = tokenizer self.device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu" self.model.to(self.device) assert self.model.__class__.__name__ in ["T5ForConditionalGeneration", "BartForConditionalGeneration"] if "T5ForConditionalGeneration" in self.model.__class__.__name__: self.model_type = "t5" else: self.model_type = "bart" self.default_generate_kwargs = { "max_length": 256, "num_beams": 4, "length_penalty": 1.5, "no_repeat_ngram_size": 3, "early_stopping": True, } def __call__(self, context: str, **generate_kwargs): inputs = self._prepare_inputs_for_e2e_qg(context) # TODO: when overrding default_generate_kwargs all other arguments need to be passsed # find a better way to do this if not generate_kwargs: generate_kwargs = self.default_generate_kwargs input_length = inputs["input_ids"].shape[-1] # max_length = generate_kwargs.get("max_length", 256) # if input_length < max_length: # logger.warning( # "Your max_length is set to {}, but you input_length is only {}. You might consider decreasing max_length manually, e.g. summarizer('...', max_length=50)".format( # max_length, input_length # ) # ) outs = self.model.generate( input_ids=inputs['input_ids'].to(self.device), attention_mask=inputs['attention_mask'].to(self.device), **generate_kwargs ) prediction = self.tokenizer.decode(outs[0], skip_special_tokens=True) questions = prediction.split("<sep>") questions = [question.strip() for question in questions[:-1]] return questions def _prepare_inputs_for_e2e_qg(self, context): source_text = f"generate questions: {context}" if self.model_type == "t5": source_text = source_text + " </s>" inputs = self._tokenize([source_text], padding=False) return inputs def _tokenize( self, inputs, padding=True, truncation=True, add_special_tokens=True, max_length=512 ): inputs = self.tokenizer.batch_encode_plus( inputs, max_length=max_length, add_special_tokens=add_special_tokens, truncation=truncation, padding="max_length" if padding else False, pad_to_max_length=padding, return_tensors="pt" ) return inputs SUPPORTED_TASKS = { "question-generation": { "impl": QGPipeline, "default": { "model": "valhalla/t5-small-qg-hl", "ans_model": "valhalla/t5-small-qa-qg-hl", } }, "multitask-qa-qg": { "impl": MultiTaskQAQGPipeline, "default": { "model": "valhalla/t5-small-qa-qg-hl", } }, "e2e-qg": { "impl": E2EQGPipeline, "default": { "model": "valhalla/t5-small-e2e-qg", } } } def pipeline( task: str, model: Optional = None, tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None, qg_format: Optional[str] = "highlight", ans_model: Optional = None, ans_tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None, use_cuda: Optional[bool] = True, **kwargs, ): # Retrieve the task if task not in SUPPORTED_TASKS: raise KeyError("Unknown task {}, available tasks are {}".format(task, list(SUPPORTED_TASKS.keys()))) targeted_task = SUPPORTED_TASKS[task] task_class = targeted_task["impl"] # Use default model/config/tokenizer for the task if no model is provided if model is None: model = targeted_task["default"]["model"] # Try to infer tokenizer from model or config name (if provided as str) if tokenizer is None: if isinstance(model, str): tokenizer = model else: # Impossible to guest what is the right tokenizer here raise Exception( "Impossible to guess which tokenizer to use. " "Please provided a PretrainedTokenizer class or a path/identifier to a pretrained tokenizer." ) # Instantiate tokenizer if needed if isinstance(tokenizer, (str, tuple)): if isinstance(tokenizer, tuple): # For tuple we have (tokenizer name, {kwargs}) tokenizer = AutoTokenizer.from_pretrained(tokenizer[0], **tokenizer[1]) else: tokenizer = AutoTokenizer.from_pretrained(tokenizer) # Instantiate model if needed if isinstance(model, str): model = AutoModelForSeq2SeqLM.from_pretrained(model) if task == "question-generation": if ans_model is None: # load default ans model ans_model = targeted_task["default"]["ans_model"] ans_tokenizer = AutoTokenizer.from_pretrained(ans_model) ans_model = AutoModelForSeq2SeqLM.from_pretrained(ans_model) else: # Try to infer tokenizer from model or config name (if provided as str) if ans_tokenizer is None: if isinstance(ans_model, str): ans_tokenizer = ans_model else: # Impossible to guest what is the right tokenizer here raise Exception( "Impossible to guess which tokenizer to use. " "Please provided a PretrainedTokenizer class or a path/identifier to a pretrained tokenizer." ) # Instantiate tokenizer if needed if isinstance(ans_tokenizer, (str, tuple)): if isinstance(ans_tokenizer, tuple): # For tuple we have (tokenizer name, {kwargs}) ans_tokenizer = AutoTokenizer.from_pretrained(ans_tokenizer[0], **ans_tokenizer[1]) else: ans_tokenizer = AutoTokenizer.from_pretrained(ans_tokenizer) if isinstance(ans_model, str): ans_model = AutoModelForSeq2SeqLM.from_pretrained(ans_model) if task == "e2e-qg": return task_class(model=model, tokenizer=tokenizer, use_cuda=use_cuda) elif task == "question-generation": return task_class(model=model, tokenizer=tokenizer, ans_model=ans_model, ans_tokenizer=ans_tokenizer, qg_format=qg_format, use_cuda=use_cuda) else: return task_class(model=model, tokenizer=tokenizer, ans_model=model, ans_tokenizer=tokenizer, qg_format=qg_format, use_cuda=use_cuda)
__init__.py
null
__init__.py
null
DRL_train.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import numpy as np import pandas as pd import time import sys import logging from reinforcement.agents.ddqn import TrainDDQN from reinforcement.agents.dqn import TrainDQN from reinforcement.utils import rounded_dict from tensorflow.keras.layers import Dense, Dropout from sklearn.model_selection import train_test_split from learner.machinelearning import machinelearning from learner.aion_matrix import aion_matrix from reinforcement.metrics import network_predictions from learner.machinelearning import machinelearning os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # CPU is faster than GPU on structured data #def TrainRL(input_csv_file, model_save_path, rl_config, RL_Algo_Name): class ReinformentLearning(): def __init__(self,rlConfig,scoreParam,modelType): self.rl_config= rlConfig self.scoreParam = scoreParam self.log = logging.getLogger('eion') self.modelType = modelType def TrainRL(self,xtrain,ytrain,xtest,ytest,algorithm,deployLocation): try: scoredetails = '' X_train, xval, y_train, yval = train_test_split(xtrain, ytrain, test_size=0.2, stratify=ytrain) X_train = np.array(X_train) y_train = np.array(y_train) xval = np.array(xval) yval = np.array(yval) valueCount=ytrain.value_counts() categoryCountList=valueCount.tolist() xtest = np.array(xtest) ytest = np.array(ytest) objClf = aion_matrix() episodes = self.rl_config['episodes'] # Total number of episodes warmup_steps = self.rl_config['warmup_steps'] # Amount of warmup steps to collect data with random policy memory_length = warmup_steps # Max length of the Replay Memory batch_size = self.rl_config['batch_size'] collect_steps_per_episode = self.rl_config['collect_steps_per_episode'] collect_every = self.rl_config['collect_every'] target_update_period = self.rl_config['target_update_period'] # Period to overwrite the target Q-network with the default Q-network target_update_tau = self.rl_config['target_update_tau'] # Soften the target model update n_step_update = self.rl_config['n_step_update'] learning_rate = self.rl_config['learning_rate'] # Learning rate gamma = self.rl_config['gamma'] # Discount factor min_epsilon = self.rl_config['min_epsilon'] # Minimal and final chance of choosing random action decay_episodes = episodes // 10 # Number of episodes to decay from 1.0 to `min_epsilon`` layers = [Dense(128, activation="relu"), #need modification Dense(64, activation="relu"), Dense(32, activation="relu"), Dense(len(np.unique(y_train)), activation=None)] logFilePath=os.path.join(deployLocation,'log') if algorithm == "DQN": start = time.time() modelName = "DQN" model_save_path = os.path.dirname(__file__) model = TrainDQN(episodes, warmup_steps, learning_rate, gamma, min_epsilon, decay_episodes, target_update_period=target_update_period,target_update_tau=target_update_tau, batch_size=batch_size, collect_steps_per_episode=collect_steps_per_episode,memory_length=memory_length, collect_every=collect_every, n_step_update=n_step_update, model_path=model_save_path,log_dir=logFilePath) model.compile_model(X_train,y_train,layers) model.q_net.summary() model.train(xval,yval) network = model.get_network() predictedytrain=network_predictions(network,np.array(xtrain)) predictedytest = network_predictions(network,np.array(xtest)) if "DDQN" == algorithm: start = time.time() modelName = "DDQN" model = TrainDDQN(episodes, warmup_steps, learning_rate, gamma, min_epsilon, decay_episodes, target_update_period=target_update_period,target_update_tau=target_update_tau, batch_size=batch_size, collect_steps_per_episode=collect_steps_per_episode,memory_length=memory_length, collect_every=collect_every, n_step_update=n_step_update,log_dir=logFilePath) model.compile_model(X_train,y_train,layers) model.q_net.summary() model.train(xval,yval) network = model.get_network() predictedytrain=network_predictions(network,np.array(xtrain)) predictedytest = network_predictions(network,np.array(xtest)) score = objClf.get_score(self.scoreParam,ytest,predictedytest) score = round(score,2) return (network,self.rl_config,score,algorithm,-1,-1,-1) except Exception as inst: self.log.info( '\n-----> RL Failed!!!.'+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
DRL_predict.py
import os import numpy as np import pandas as pd import time from DeepRL.agents.ddqn import TrainDDQN from DeepRL.agents.dqn import TrainDQN from DeepRL.dataprocess import get_train_test_val from DeepRL.utils import rounded_dict from tensorflow.keras.layers import Dense, Dropout from sklearn.model_selection import train_test_split os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # CPU is faster than GPU on structured data def PredictRL(input_csv_file, model_load_path, RL_hparams_config_file, RL_Algo_Name): if not (os.path.exists(model_load_path)): os.makedirs(model_load_path) episodes = RL_hparams_config_file['DeepRL']['episodes'] # Total number of episodes warmup_steps = RL_hparams_config_file['DeepRL']['warmup_steps'] # Amount of warmup steps to collect data with random policy memory_length = warmup_steps # Max length of the Replay Memory batch_size = RL_hparams_config_file['DeepRL']['batch_size'] collect_steps_per_episode = RL_hparams_config_file['DeepRL']['collect_steps_per_episode'] collect_every = RL_hparams_config_file['DeepRL']['collect_every'] target_update_period = RL_hparams_config_file['DeepRL']['target_update_period'] # Period to overwrite the target Q-network with the default Q-network target_update_tau = RL_hparams_config_file['DeepRL']['target_update_tau'] # Soften the target model update n_step_update = RL_hparams_config_file['DeepRL']['n_step_update'] learning_rate = RL_hparams_config_file['DeepRL']['learning_rate'] # Learning rate gamma = RL_hparams_config_file['DeepRL']['gamma'] # Discount factor min_epsilon = RL_hparams_config_file['DeepRL']['min_epsilon'] # Minimal and final chance of choosing random action decay_episodes = episodes // 10 # Number of episodes to decay from 1.0 to `min_epsilon`` #path = '/home/renith/Renith/Project/AION/Reinforcement/RL_Classification/Code/rl_text_classification/telemetry_data.csv' data = pd.read_csv(input_csv_file) device5 = data[data['device_id'] == "Device_1"] device5 = device5.drop(['device_id'], axis = 1) device5.reset_index(drop=True, inplace=True) target_value = [] for i in range(device5['device_status'].shape[0]): if(device5['device_status'][i] == "NORMAL"): target_value.append(0.0) else: target_value.append(1.0) device5['target'] = target_value device5 = device5.drop(['device_status'], axis = 1) X_test = device5.iloc[:,1:-1] y_test = device5.iloc[:,-1] X_test = X_test.astype(np.float32) y_test = y_test.astype(np.int32) #Normalization mini, maxi = X_test.min(axis=0), X_test.max(axis=0) X_test -= mini X_test /= maxi - mini min_class = [1] #Minority class maj_class = [0] #Majority class #X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.8, stratify=y_train) #X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, stratify=y_train) #X_train = np.array(X_train) #y_train = np.array(y_train) #X_val = np.array(X_val) #y_val = np.array(y_val) X_test = np.array(X_test) y_test = np.array(y_test) #X_train, y_train, X_test, y_test, X_val, y_val = get_train_test_val(X_train.values, y_train.values, X_test.values, y_test.values, # min_class, maj_class, val_frac=0.2) layers = [Dense(128, activation="relu"), Dense(64, activation="relu"), Dense(32, activation="relu"), Dense(2, activation=None)] if(RL_Algo_Name == "DDQN"): model = TrainDDQN(episodes, warmup_steps, learning_rate, gamma, min_epsilon, decay_episodes, target_update_period=target_update_period, target_update_tau=target_update_tau, batch_size=batch_size, collect_steps_per_episode=collect_steps_per_episode, memory_length=memory_length, collect_every=collect_every, n_step_update=n_step_update, model_path=model_load_path) elif(RL_Algo_Name == "DQN"): model = TrainDQN(episodes, warmup_steps, learning_rate, gamma, min_epsilon, decay_episodes, target_update_period=target_update_period, target_update_tau=target_update_tau, batch_size=batch_size, collect_steps_per_episode=collect_steps_per_episode, memory_length=memory_length, collect_every=collect_every, n_step_update=n_step_update, model_path=model_load_path) model.compile_model(X_test, y_test, layers) model.q_net.summary() #model.train(X_val, y_val, "F1") #print("Training Ended !!!!") stats = model.evaluate(X_test, y_test) print(rounded_dict(stats)) #stats = model.evaluate(X_train, y_train) #print(rounded_dict(stats))
__init__.py
null
metrics.py
import matplotlib.pyplot as plt import numpy as np import seaborn as sns from sklearn.metrics import (auc, average_precision_score, confusion_matrix, f1_score, precision_recall_curve, roc_curve,precision_score,recall_score) from tensorflow import constant from tf_agents.trajectories import time_step def network_predictions(network, X: np.ndarray) -> dict: """Computes y_pred using a given network. Input is array of data entries. :param network: The network to use to calculate metrics :type network: (Q)Network :param X: X data, input to network :type X: np.ndarray :return: Numpy array of predicted targets for given X :rtype: np.ndarray """ if not isinstance(X, np.ndarray): raise ValueError(f"`X` must be of type `np.ndarray` not {type(X)}") q, _ = network(X, step_type=constant([time_step.StepType.FIRST] * X.shape[0]), training=False) return np.argmax(q.numpy(), axis=1) # Max action for each x in X def decision_function(network, X: np.ndarray) -> dict: """Computes the score for the predicted class of each x in X using a given network. Input is array of data entries. :param network: The network to use to calculate the score per x in X :type network: (Q)Network :param X: X data, input to network :type X: np.ndarray :return: Numpy array of scores for given X :rtype: np.ndarray """ if not isinstance(X, np.ndarray): raise ValueError(f"`X` must be of type `np.ndarray` not {type(X)}") q, _ = network(X, step_type=constant([time_step.StepType.FIRST] * X.shape[0]), training=False) return np.max(q.numpy(), axis=1) # Value of max action for each x in X def classification_metrics(y_true: list, y_pred: list) -> dict: """Computes metrics using y_true and y_pred. :param y_true: True labels :type y_true: np.ndarray :param y_pred: Predicted labels, corresponding to y_true :type y_pred: np.ndarray :return: Dictionairy containing Geometric Mean, F1, Precision, Recall, TP, TN, FP, FN :rtype: dict """ if not isinstance(y_true, (list, tuple, np.ndarray)): raise ValueError(f"`y_true` must be of type `list` not {type(y_true)}") if not isinstance(y_pred, (list, tuple, np.ndarray)): raise ValueError(f"`y_pred` must be of type `list` not {type(y_pred)}") if len(y_true) != len(y_pred): raise ValueError("`X` and `y` must be of same length.") #G_mean = np.sqrt(recall * specificity) # Geometric mean of recall and specificity F1 = f1_score(y_true, y_pred, average='macro') # Default F-measure recall = recall_score(y_true,y_pred,average='macro') precision = precision_score(y_true,y_pred,average='macro') return {"F1": F1, "Precision": precision, "Recall": recall} def plot_pr_curve(network, X_test: np.ndarray, y_test: np.ndarray, X_train: np.ndarray = None, y_train: np.ndarray = None) -> None: # pragma: no cover """Plots PR curve of X_test and y_test of given network. Optionally plots PR curve of X_train and y_train. Average precision is shown in the legend. :param network: The network to use to calculate the PR curve :type network: (Q)Network :param X_test: X data, input to network :type X_test: np.ndarray :param y_test: True labels for `X_test` :type y_test: np.ndarray :param X_train: Optional X data to plot validation PR curve :type X_train: np.ndarray :param y_train: True labels for `X_val` :type y_train: np.ndarray :return: None :rtype: NoneType """ plt.plot((0, 1), (1, 0), color="black", linestyle="--", label="Baseline") # TODO: Consider changing baseline if X_train is not None and y_train is not None: y_val_score = decision_function(network, X_train) val_precision, val_recall, _ = precision_recall_curve(y_train, y_val_score) val_AP = average_precision_score(y_train, y_val_score) plt.plot(val_recall, val_precision, label=f"Train AP: {val_AP:.3f}") y_test_score = decision_function(network, X_test) test_precision, test_recall, _ = precision_recall_curve(y_test, y_test_score) test_AP = average_precision_score(y_test, y_test_score) plt.plot(test_recall, test_precision, label=f"Test AP: {test_AP:.3f}") plt.xlim((-0.05, 1.05)) plt.ylim((-0.05, 1.05)) plt.xlabel("Recall") plt.ylabel("Precision") plt.title("PR Curve") plt.gca().set_aspect("equal", adjustable="box") plt.legend(loc="lower left") plt.grid(True) plt.show() def plot_roc_curve(network, X_test: np.ndarray, y_test: np.ndarray, X_train: np.ndarray = None, y_train: np.ndarray = None) -> None: # pragma: no cover """Plots ROC curve of X_test and y_test of given network. Optionally plots ROC curve of X_train and y_train. Average precision is shown in the legend. :param network: The network to use to calculate the PR curve :type network: (Q)Network :param X_test: X data, input to network :type X_test: np.ndarray :param y_test: True labels for `X_test` :type y_test: np.ndarray :param X_train: Optional X data to plot validation PR curve :type X_train: np.ndarray :param y_train: True labels for `X_val` :type y_train: np.ndarray :return: None :rtype: NoneType """ plt.plot((0, 1), (0, 1), color="black", linestyle="--", label="Baseline") # TODO: Consider changing baseline if X_train is not None and y_train is not None: y_train_score = decision_function(network, X_train) fpr_train, tpr_train, _ = roc_curve(y_train, y_train_score) plt.plot(fpr_train, tpr_train, label=f"Train AUROC: {auc(fpr_train, tpr_train):.2f}") y_test_score = decision_function(network, X_test) fpr_test, tpr_test, _ = roc_curve(y_test, y_test_score) plt.plot(fpr_test, tpr_test, label=f"Test AUROC: {auc(fpr_test, tpr_test):.2f}") plt.xlim((-0.05, 1.05)) plt.ylim((-0.05, 1.05)) plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("ROC Curve") plt.gca().set_aspect("equal", adjustable="box") plt.legend(loc="lower right") plt.grid(True) plt.show() def plot_confusion_matrix(TP: int, FN: int, FP: int, TN: int) -> None: # pragma: no cover """Plots confusion matric of given TP, FN, FP, TN. :param TP: True Positive :type TP: int :param FN: False Negative :type FN: int :param FP: False Positive :type FP: int :param TN: True Negative :type TN: int :return: None :rtype: NoneType """ if not all(isinstance(i, (int, np.integer)) for i in (TP, FN, FP, TN)): raise ValueError("Not all arguments are integers.") ticklabels = ("Minority", "Majority") sns.heatmap(((TP, FN), (FP, TN)), annot=True, fmt="_d", cmap="viridis", xticklabels=ticklabels, yticklabels=ticklabels) plt.title("Confusion matrix") plt.xlabel("Predicted labels") plt.ylabel("True labels") plt.show()
utils.py
import os from typing import List import numpy as np import pandas as pd from sklearn.model_selection import train_test_split def split_csv(fp: str = "./data/creditcard.csv", fp_dest: str = "./data", name: str = "credit", test_size: int = 0.5, strat_col: str = "Class") -> None: """Splits a csv file in two, in a stratified fashion. Format for filenames will be `{name}0.csv and `{name}1.csv`. :param fp: The path at which the csv file is located. :type fp: str :param fp_dest: The path to save the train and test files. :type fp_dest: str :param name: The prefix for the files. :type name: str :param test_size: The fraction of total size for the test file. :type test_size: float :param strat_col: The column in the original csv file to stratify. :return: None, two files located at `fp_dest`. :rtype: NoneType """ if not os.path.isfile(fp): raise FileNotFoundError(f"File at {fp} does not exist.") if not os.path.isdir(fp_dest): raise ValueError(f"Directory at {fp_dest} does not exist.") if not 0 < test_size < 1: raise ValueError(f"{test_size} is not in interval 0 < x < 1.") df = pd.read_csv(fp) if not (strat_col in df.columns): raise ValueError(f"Stratify column {strat_col} not found in DataFrame.") train, test = train_test_split(df, test_size=test_size, stratify=df[strat_col]) train.to_csv(f"{fp_dest}/{name}0.csv", index=False) test.to_csv(f"{fp_dest}/{name}1.csv", index=False) def rounded_dict(d: dict, precision: int = 6) -> dict: """Rounds all values in a dictionairy to `precision` digits after the decimal point. :param d: Dictionairy containing only floats or ints as values :type d: dict :return: Rounded dictionairy :rtype: dict """ return {k: round(v, precision) for k, v in d.items()} def imbalance_ratio(y: np.ndarray, min_classes: List[int] = [1], maj_classes: List[int] = [0]) -> float: """Calculates imbalance ratio of minority class(es) and majority class(es). :param y: y-vector with labels. :type y: np.ndarray :param min_classes: The labels of the minority classes :type min_classes: list :param maj_classes: The labels of the minority classes :type maj_classes: list :return: The imbalance ratio :rtype: float """ return np.isin(y, min_classes).sum() / np.isin(y, maj_classes).sum()
dataprocess.py
import os from typing import List, Tuple import numpy as np from pandas import read_csv from sklearn.model_selection import train_test_split from sklearn.utils import shuffle from tensorflow.keras.datasets import cifar10, fashion_mnist, imdb, mnist from tensorflow.keras.preprocessing.sequence import pad_sequences TrainTestData = Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray] TrainTestValData = Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray] def load_image(data_source: str) -> TrainTestData: """ Loads one of the following image datasets: {mnist, famnist, cifar10}. Normalizes the data. Returns X and y for both train and test datasets. Dtypes of X's and y's will be `float32` and `int32` to be compatible with `tf_agents`. :param data_source: Either mnist, famnist or cifar10 :type data_source: str :return: Tuple of (X_train, y_train, X_test, y_test) containing original split of train/test :rtype: tuple """ reshape_shape = -1, 28, 28, 1 if data_source == "mnist": (X_train, y_train), (X_test, y_test) = mnist.load_data() elif data_source == "famnist": (X_train, y_train), (X_test, y_test) = fashion_mnist.load_data() elif data_source == "cifar10": (X_train, y_train), (X_test, y_test) = cifar10.load_data() reshape_shape = -1, 32, 32, 3 else: raise ValueError("No valid `data_source`.") X_train = X_train.reshape(reshape_shape).astype(np.float32) # Float32 is the expected dtype for the observation spec in the env X_test = X_test.reshape(reshape_shape).astype(np.float32) X_train /= 255 # /= is not available when casting int to float: https://stackoverflow.com/a/48948461/10603874 X_test /= 255 y_train = y_train.reshape(y_train.shape[0], ).astype(np.int32) y_test = y_test.reshape(y_test.shape[0], ).astype(np.int32) return X_train, y_train, X_test, y_test def load_csv(fp_train: str, fp_test: str, label_col: str, drop_cols: List[str], normalization: bool = False) -> TrainTestData: """ Loads any csv-file from local filepaths. Returns X and y for both train and test datasets. Option to normalize the data with min-max normalization. Only csv-files with float32 values for the features and int32 values for the labels supported. Source for dataset: https://mimic-iv.mit.edu/ :param fp_train: Location of the train csv-file :type fp_train: str :param fp_test: Location of the test csv-file :type fp_test: str :param label_col: The name of the column containing the labels of the data :rtype label_col: str :param drop_cols: List of the names of the columns to be dropped. `label_col` gets dropped automatically :rtype drop_cols: List of strings :param normalization: Normalize the data with min-max normalization? :type normalization: bool :return: Tuple of (X_train, y_train, X_test, y_test) containing original split of train/test :rtype: Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray] """ if not os.path.isfile(fp_train): raise FileNotFoundError(f"`fp_train` {fp_train} does not exist.") if not os.path.isfile(fp_test): raise FileNotFoundError(f"`fp_test` {fp_test} does not exist.") if not isinstance(normalization, bool): raise TypeError(f"`normalization` must be of type `bool`, not {type(normalization)}") X_train = read_csv(fp_train).astype(np.float32) # DataFrames directly converted to float32 X_test = read_csv(fp_test).astype(np.float32) y_train = X_train[label_col].astype(np.int32) y_test = X_test[label_col].astype(np.int32) X_train.drop(columns=drop_cols + [label_col], inplace=True) # Dropping cols and label column X_test.drop(columns=drop_cols + [label_col], inplace=True) # Other data sources are already normalized. RGB values are always in range 0 to 255. if normalization: mini, maxi = X_train.min(axis=0), X_train.max(axis=0) X_train -= mini X_train /= maxi - mini X_test -= mini X_test /= maxi - mini return X_train.values, y_train.values, X_test.values, y_test.values # Numpy arrays def load_imdb(config: Tuple[int, int] = (5_000, 500)) -> TrainTestData: """Loads the IMDB dataset. Returns X and y for both train and test datasets. :param config: Tuple of number of most frequent words and max length of each sequence. :type config: str :return: Tuple of (X_train, y_train, X_test, y_test) containing original split of train/test :rtype: tuple """ if not isinstance(config, (tuple, list)): raise TypeError(f"{type(config)} is no valid datatype for `config`.") if len(config) != 2: raise ValueError("Tuple length of `config` must be 2.") if not all(i > 0 for i in config): raise ValueError("All integers of `config` must be > 0.") (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=config[0]) X_train = pad_sequences(X_train, maxlen=config[1]) X_test = pad_sequences(X_test, maxlen=config[1]) y_train = y_train.astype(np.int32) y_test = y_test.astype(np.int32) return X_train, y_train, X_test, y_test def get_train_test_val(X_train: np.ndarray, y_train: np.ndarray, X_test: np.ndarray, y_test: np.ndarray, min_classes: List[int], maj_classes: List[int], imb_ratio: float = None, imb_test: bool = True, val_frac: float = 0.25, print_stats: bool = True) -> TrainTestValData: """ Imbalances data and divides the data into train, test and validation sets. The imbalance rate of each individual dataset is approx. the same as the given `imb_ratio`. :param X_train: The X_train data :type X_train: np.ndarray :param y_train: The y_train data :type y_train: np.ndarray :param X_test: The X_test data :type X_test: np.ndarray :param y_test: The y_test data :type y_test: np.ndarray :param min_classes: List of labels of all minority classes :type min_classes: list :param maj_classes: List of labels of all majority classes. :type maj_classes: list :param imb_ratio: Imbalance ratio for minority to majority class: len(minority datapoints) / len(majority datapoints) If the `imb_ratio` is None, data will not be imbalanced and will only be relabeled to 1's and 0's. :type imb_ratio: float :param imb_test: Imbalance the test dataset? :type imb_test: bool :param val_frac: Fraction to take from X_train and y_train for X_val and y_val :type val_frac: float :param print_stats: Print the imbalance ratio of the imbalanced data? :type print_stats: bool :return: Tuple of (X_train, y_train, X_test, y_test, X_val, y_val) :rtype: Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray] """ if not 0 < val_frac < 1: raise ValueError(f"{val_frac} is not in interval 0 < x < 1.") if not isinstance(print_stats, bool): raise TypeError(f"`print_stats` must be of type `bool`, not {type(print_stats)}.") X_train, y_train = imbalance_data(X_train, y_train, min_classes, maj_classes, imb_ratio=imb_ratio) # Only imbalance test-data if imb_test is True X_test, y_test = imbalance_data(X_test, y_test, min_classes, maj_classes, imb_ratio=imb_ratio if imb_test else None) # stratify=y_train to ensure class balance is kept between train and validation datasets X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=val_frac, stratify=y_train) if print_stats: p_train, p_test, p_val = [((y == 1).sum(), imbalance_ratio(y)) for y in (y_train, y_test, y_val)] print(f"Imbalance ratio `p`:\n" f"\ttrain: n={p_train[0]}, p={p_train[1]:.6f}\n" f"\ttest: n={p_test[0]}, p={p_test[1]:.6f}\n" f"\tvalidation: n={p_val[0]}, p={p_val[1]:.6f}") return X_train, y_train, X_test, y_test, X_val, y_val def imbalance_data(X: np.ndarray, y: np.ndarray, min_class: List[int], maj_class: List[int], imb_ratio: float = None) -> Tuple[np.ndarray, np.ndarray]: """ Split data in minority and majority, only values in {min_class, maj_class} will be kept. (Possibly) decrease minority rows to match the imbalance rate. If initial imb_ratio of dataset is lower than given `imb_ratio`, the imb_ratio of the returned data will not be changed. If the `imb_ratio` is None, data will not be imbalanced and will only be relabeled to 1's and 0's. """ if not isinstance(X, np.ndarray): raise TypeError(f"`X` must be of type `np.ndarray` not {type(X)}") if not isinstance(y, np.ndarray): raise TypeError(f"`y` must be of type `np.ndarray` not {type(y)}") if X.shape[0] != y.shape[0]: raise ValueError("`X` and `y` must contain the same amount of rows.") if not isinstance(min_class, (list, tuple)): raise TypeError("`min_class` must be of type list or tuple.") if not isinstance(maj_class, (list, tuple)): raise TypeError("`maj_class` must be of type list or tuple.") if (imb_ratio is not None) and not (0 < imb_ratio < 1): raise ValueError(f"{imb_ratio} is not in interval 0 < imb_ratio < 1.") if imb_ratio is None: # Do not imbalance data if no `imb_ratio` is given imb_ratio = 1 X_min = X[np.isin(y, min_class)] # Mask the correct indexes X_maj = X[np.isin(y, maj_class)] # Only keep data/labels for x in {min_class, maj_class} and forget all other min_len = int(X_maj.shape[0] * imb_ratio) # Amount of rows to select from minority classes to get to correct imbalance ratio # Keep all majority rows, decrease minority rows to match `imb_ratio` X_min = X_min[np.random.choice(X_min.shape[0], min(min_len, X_min.shape[0]), replace=False), :] X_imb = np.concatenate([X_maj, X_min]).astype(np.float32) y_imb = np.concatenate((np.zeros(X_maj.shape[0]), np.ones(X_min.shape[0]))).astype(np.int32) X_imb, y_imb = shuffle(X_imb, y_imb) return X_imb, y_imb
__init__.py
null
dqn.py
import os import pickle from datetime import datetime import numpy as np import tensorflow as tf from reinforcement.environments.classifierenv import ClassifierEnv from reinforcement.metrics import (classification_metrics, decision_function, network_predictions, plot_pr_curve, plot_roc_curve) from reinforcement.utils import imbalance_ratio from tensorflow import data from tensorflow.keras.optimizers import Adam #from tf_agents.agents.dqn.dqn_agent import DdqnAgent from tf_agents.agents import DqnAgent from tf_agents.drivers.dynamic_step_driver import DynamicStepDriver from tf_agents.environments.tf_py_environment import TFPyEnvironment from tf_agents.networks.sequential import Sequential from tf_agents.policies.random_tf_policy import RandomTFPolicy from tf_agents.replay_buffers.tf_uniform_replay_buffer import \ TFUniformReplayBuffer from tf_agents.utils import common class TrainDQN(): """Wrapper for DDQN training, validation, saving etc.""" def __init__(self, episodes: int, warmup_steps: int, learning_rate: float, gamma: float, min_epsilon: float, decay_episodes: int, model_path: str = None, log_dir: str = None, batch_size: int = 64, memory_length: int = None, collect_steps_per_episode: int = 1, val_every: int = None, target_update_period: int = 1, target_update_tau: float = 1.0, progressbar: bool = True, n_step_update: int = 1, gradient_clipping: float = 1.0, collect_every: int = 1) -> None: """ Wrapper to make training easier. Code is partly based of https://www.tensorflow.org/agents/tutorials/1_dqn_tutorial :param episodes: Number of training episodes :type episodes: int :param warmup_steps: Number of episodes to fill Replay Buffer with random state-action pairs before training starts :type warmup_steps: int :param learning_rate: Learning Rate for the Adam Optimizer :type learning_rate: float :param gamma: Discount factor for the Q-values :type gamma: float :param min_epsilon: Lowest and final value for epsilon :type min_epsilon: float :param decay_episodes: Amount of episodes to decay from 1 to `min_epsilon` :type decay_episodes: int :param model_path: Location to save the trained model :type model_path: str :param log_dir: Location to save the logs, usefull for TensorBoard :type log_dir: str :param batch_size: Number of samples in minibatch to train on each step :type batch_size: int :param memory_length: Maximum size of the Replay Buffer :type memory_length: int :param collect_steps_per_episode: Amount of data to collect for Replay Buffer each episiode :type collect_steps_per_episode: int :param collect_every: Step interval to collect data during training :type collect_every: int :param val_every: Validate the model every X episodes using the `collect_metrics()` function :type val_every: int :param target_update_period: Update the target Q-network every X episodes :type target_update_period: int :param target_update_tau: Parameter for softening the `target_update_period` :type target_update_tau: float :param progressbar: Enable or disable the progressbar for collecting data and training :type progressbar: bool :return: None :rtype: NoneType """ self.episodes = episodes # Total episodes self.warmup_steps = warmup_steps # Amount of warmup steps before training self.batch_size = batch_size # Batch size of Replay Memory self.collect_steps_per_episode = collect_steps_per_episode # Amount of steps to collect data each episode self.collect_every = collect_every # Step interval to collect data during training self.learning_rate = learning_rate # Learning Rate self.gamma = gamma # Discount factor self.min_epsilon = min_epsilon # Minimal chance of choosing random action self.decay_episodes = decay_episodes # Number of episodes to decay from 1.0 to `EPSILON` self.target_update_period = target_update_period # Period for soft updates self.target_update_tau = target_update_tau self.progressbar = progressbar # Enable or disable the progressbar for collecting data and training self.n_step_update = n_step_update self.gradient_clipping = gradient_clipping # Clip the loss self.compiled = False NOW = "DQN" #datetime.now().strftime("%Y%m%d_%H%M%S") if memory_length is not None: self.memory_length = memory_length # Max Replay Memory length else: self.memory_length = warmup_steps if val_every is not None: self.val_every = val_every # Validate the policy every `val_every` episodes else: self.val_every = self.episodes // min(50, self.episodes) # Can't validate the model 50 times if self.episodes < 50 if model_path is not None: #if os.path.exists(model_path + "/" + NOW + ".pkl"): # os.remove(model_path + "/" + NOW + ".pkl") self.model_path = model_path + "/" + NOW + ".pkl" else: self.model_path = "./models/" + NOW + ".pkl" if log_dir is None: log_dir = "./logs/" + NOW self.writer = tf.summary.create_file_writer(log_dir) def compile_model(self, X_train, y_train, layers: list = [], imb_ratio: float = None, loss_fn=common.element_wise_squared_loss) -> None: """Initializes the neural networks, DDQN-agent, collect policies and replay buffer. :param X_train: Training data for the model. :type X_train: np.ndarray :param y_train: Labels corresponding to `X_train`. 1 for the positive class, 0 for the negative class. :param y_train: np.ndarray :param layers: List of layers to feed into the TF-agents custom Sequential(!) layer. :type layers: list :param imb_ratio: The imbalance ratio of the data. :type imb_ratio: float :param loss_fn: Callable loss function :type loss_fn: tf.compat.v1.losses :return: None :rtype: NoneType """ if imb_ratio is None: imb_ratio = imbalance_ratio(y_train) self.train_env = TFPyEnvironment(ClassifierEnv(X_train, y_train, imb_ratio)) self.global_episode = tf.Variable(0, name="global_episode", dtype=np.int64, trainable=False) # Global train episode counter # Custom epsilon decay: https://github.com/tensorflow/agents/issues/339 epsilon_decay = tf.compat.v1.train.polynomial_decay( 1.0, self.global_episode, self.decay_episodes, end_learning_rate=self.min_epsilon) self.q_net = Sequential(layers, self.train_env.observation_spec()) self.agent = DqnAgent(self.train_env.time_step_spec(), self.train_env.action_spec(), q_network=self.q_net, optimizer=Adam(learning_rate=self.learning_rate), td_errors_loss_fn=loss_fn, train_step_counter=self.global_episode, target_update_period=self.target_update_period, target_update_tau=self.target_update_tau, gamma=self.gamma, epsilon_greedy=epsilon_decay, n_step_update=self.n_step_update, gradient_clipping=self.gradient_clipping) self.agent.initialize() self.random_policy = RandomTFPolicy(self.train_env.time_step_spec(), self.train_env.action_spec()) self.replay_buffer = TFUniformReplayBuffer(data_spec=self.agent.collect_data_spec, batch_size=self.train_env.batch_size, max_length=self.memory_length) self.warmup_driver = DynamicStepDriver(self.train_env, self.random_policy, observers=[self.replay_buffer.add_batch], num_steps=self.warmup_steps) # Uses a random policy self.collect_driver = DynamicStepDriver(self.train_env, self.agent.collect_policy, observers=[self.replay_buffer.add_batch], num_steps=self.collect_steps_per_episode) # Uses the epsilon-greedy policy of the agent self.agent.train = common.function(self.agent.train) # Optimalization self.warmup_driver.run = common.function(self.warmup_driver.run) self.collect_driver.run = common.function(self.collect_driver.run) self.compiled = True def train(self, *args) -> None: """Starts the training of the model. Includes warmup period, metrics collection and model saving. :param *args: All arguments will be passed to `collect_metrics()`. This can be usefull to pass callables, testing environments or validation data. Overwrite the TrainDQN.collect_metrics() function to use your own *args. :type *args: Any :return: None :rtype: NoneType, last step is saving the model as a side-effect """ assert self.compiled, "Model must be compiled with model.compile_model(X_train, y_train, layers) before training." # Warmup period, fill memory with random actions if self.progressbar: print(f"\033[92mCollecting data for {self.warmup_steps:_} steps... This might take a few minutes...\033[0m") self.warmup_driver.run(time_step=None, policy_state=self.random_policy.get_initial_state(self.train_env.batch_size)) if self.progressbar: print(f"\033[92m{self.replay_buffer.num_frames():_} frames collected!\033[0m") dataset = self.replay_buffer.as_dataset(sample_batch_size=self.batch_size, num_steps=self.n_step_update + 1, num_parallel_calls=data.experimental.AUTOTUNE).prefetch(data.experimental.AUTOTUNE) iterator = iter(dataset) def _train(): experiences, _ = next(iterator) return self.agent.train(experiences).loss _train = common.function(_train) # Optimalization ts = None policy_state = self.agent.collect_policy.get_initial_state(self.train_env.batch_size) print('Before Collect Metrics') self.collect_metrics(*args) # Initial collection for step 0 print('After Collect Metrics') for _ in range(self.episodes): if not self.global_episode % self.collect_every: # Collect a few steps using collect_policy and save to `replay_buffer` if self.collect_steps_per_episode != 0: ts, policy_state = self.collect_driver.run(time_step=ts, policy_state=policy_state) # Sample a batch of data from `replay_buffer` and update the agent's network train_loss = _train() if not self.global_episode % self.val_every: with self.writer.as_default(): tf.summary.scalar("train_loss", train_loss, step=self.global_episode) self.collect_metrics(*args) def collect_metrics(self, X_val: np.ndarray, y_val: np.ndarray, save_best: str = None): """Collects metrics using the trained Q-network. :param X_val: Features of validation data, same shape as X_train :type X_val: np.ndarray :param y_val: Labels of validation data, same shape as y_train :type y_val: np.ndarra :param save_best: Saving the best model of all validation runs based on given metric: Choose one of: {Gmean, F1, Precision, Recall, TP, TN, FP, FN} This improves stability since the model at the last episode is not guaranteed to be the best model. :type save_best: str """ y_pred = network_predictions(self.agent._target_q_network, X_val) print('classification_metrics') stats = classification_metrics(y_val, y_pred) print('Before AVGQ') avgQ = np.mean(decision_function(self.agent._target_q_network, X_val)) # Max action for each x in X print('After AVGQ') if save_best is not None: if not hasattr(self, "best_score"): # If no best model yet self.best_score = 0.0 if stats.get(save_best) >= self.best_score: # Overwrite best model self.save_network() # Saving directly to avoid shallow copy without trained weights self.best_score = stats.get(save_best) with self.writer.as_default(): tf.summary.scalar("AverageQ", avgQ, step=self.global_episode) # Average Q-value for this epoch for k, v in stats.items(): tf.summary.scalar(k, v, step=self.global_episode) def evaluate(self, X_test, y_test): """ Final evaluation of trained Q-network with X_test and y_test. Optional PR and ROC curve comparison to X_train, y_train to ensure no overfitting is taking place. :param X_test: Features of test data, same shape as X_train :type X_test: np.ndarray :param y_test: Labels of test data, same shape as y_train :type y_test: np.ndarray :param X_train: Features of train data :type X_train: np.ndarray :param y_train: Labels of train data :type y_train: np.ndarray """ #if hasattr(self, "best_score"): # print(f"\033[92mBest score: {self.best_score:6f}!\033[0m") # network = self.load_network(self.model_path) # Load best saved model #else: # network = self.agent._target_q_network # Load latest target model #network = self.load_network(self.model_path) #if (X_train is not None) and (y_train is not None): # plot_pr_curve(network, X_test, y_test, X_train, y_train) # plot_roc_curve(network, X_test, y_test, X_train, y_train) y_pred = network_predictions(self.agent._target_q_network, X_test) return classification_metrics(y_test, y_pred) def save_network(self): print('save_network') """Saves Q-network as pickle to `model_path`.""" with open(self.model_path, "wb") as f: # Save Q-network as pickle pickle.dump(self.agent._target_q_network, f) def get_network(self): """Static method to load Q-network pickle from given filepath. :param fp: Filepath to the saved pickle of the network :type fp: str :returns: The network-object loaded from a pickle file. :rtype: tensorflow.keras.models.Model """ return self.agent._target_q_network
ddqn.py
import os import pickle from datetime import datetime import numpy as np import tensorflow as tf from reinforcement.environments.classifierenv import ClassifierEnv from reinforcement.metrics import (classification_metrics, decision_function, network_predictions, plot_pr_curve, plot_roc_curve) from reinforcement.utils import imbalance_ratio from tensorflow import data from tensorflow.keras.optimizers import Adam from tf_agents.agents.dqn.dqn_agent import DdqnAgent from tf_agents.drivers.dynamic_step_driver import DynamicStepDriver from tf_agents.environments.tf_py_environment import TFPyEnvironment from tf_agents.networks.sequential import Sequential from tf_agents.policies.random_tf_policy import RandomTFPolicy from tf_agents.replay_buffers.tf_uniform_replay_buffer import \ TFUniformReplayBuffer from tf_agents.utils import common class TrainDDQN(): """Wrapper for DDQN training, validation, saving etc.""" def __init__(self, episodes: int, warmup_steps: int, learning_rate: float, gamma: float, min_epsilon: float, decay_episodes: int, model_path: str = None, log_dir: str = None, batch_size: int = 64, memory_length: int = None, collect_steps_per_episode: int = 1, val_every: int = None, target_update_period: int = 1, target_update_tau: float = 1.0, progressbar: bool = True, n_step_update: int = 1, gradient_clipping: float = 1.0, collect_every: int = 1) -> None: """ Wrapper to make training easier. Code is partly based of https://www.tensorflow.org/agents/tutorials/1_dqn_tutorial :param episodes: Number of training episodes :type episodes: int :param warmup_steps: Number of episodes to fill Replay Buffer with random state-action pairs before training starts :type warmup_steps: int :param learning_rate: Learning Rate for the Adam Optimizer :type learning_rate: float :param gamma: Discount factor for the Q-values :type gamma: float :param min_epsilon: Lowest and final value for epsilon :type min_epsilon: float :param decay_episodes: Amount of episodes to decay from 1 to `min_epsilon` :type decay_episodes: int :param model_path: Location to save the trained model :type model_path: str :param log_dir: Location to save the logs, usefull for TensorBoard :type log_dir: str :param batch_size: Number of samples in minibatch to train on each step :type batch_size: int :param memory_length: Maximum size of the Replay Buffer :type memory_length: int :param collect_steps_per_episode: Amount of data to collect for Replay Buffer each episiode :type collect_steps_per_episode: int :param collect_every: Step interval to collect data during training :type collect_every: int :param val_every: Validate the model every X episodes using the `collect_metrics()` function :type val_every: int :param target_update_period: Update the target Q-network every X episodes :type target_update_period: int :param target_update_tau: Parameter for softening the `target_update_period` :type target_update_tau: float :param progressbar: Enable or disable the progressbar for collecting data and training :type progressbar: bool :return: None :rtype: NoneType """ self.episodes = episodes # Total episodes self.warmup_steps = warmup_steps # Amount of warmup steps before training self.batch_size = batch_size # Batch size of Replay Memory self.collect_steps_per_episode = collect_steps_per_episode # Amount of steps to collect data each episode self.collect_every = collect_every # Step interval to collect data during training self.learning_rate = learning_rate # Learning Rate self.gamma = gamma # Discount factor self.min_epsilon = min_epsilon # Minimal chance of choosing random action self.decay_episodes = decay_episodes # Number of episodes to decay from 1.0 to `EPSILON` self.target_update_period = target_update_period # Period for soft updates self.target_update_tau = target_update_tau self.progressbar = progressbar # Enable or disable the progressbar for collecting data and training self.n_step_update = n_step_update self.gradient_clipping = gradient_clipping # Clip the loss self.compiled = False NOW = "DDQN" #datetime.now().strftime("%Y%m%d_%H%M%S") if memory_length is not None: self.memory_length = memory_length # Max Replay Memory length else: self.memory_length = warmup_steps if val_every is not None: self.val_every = val_every # Validate the policy every `val_every` episodes else: self.val_every = self.episodes // min(50, self.episodes) # Can't validate the model 50 times if self.episodes < 50 if model_path is not None: #if os.path.exists(model_path + "/" + NOW + ".pkl"): # os.remove(model_path + "/" + NOW + ".pkl") self.model_path = model_path + "/" + NOW + ".pkl" else: self.model_path = "./models/" + NOW + ".pkl" if log_dir is None: log_dir = "./logs/" + NOW self.writer = tf.summary.create_file_writer(log_dir) def compile_model(self, X_train, y_train, layers: list = [], imb_ratio: float = None, loss_fn=common.element_wise_squared_loss) -> None: """Initializes the neural networks, DDQN-agent, collect policies and replay buffer. :param X_train: Training data for the model. :type X_train: np.ndarray :param y_train: Labels corresponding to `X_train`. 1 for the positive class, 0 for the negative class. :param y_train: np.ndarray :param layers: List of layers to feed into the TF-agents custom Sequential(!) layer. :type layers: list :param imb_ratio: The imbalance ratio of the data. :type imb_ratio: float :param loss_fn: Callable loss function :type loss_fn: tf.compat.v1.losses :return: None :rtype: NoneType """ if imb_ratio is None: imb_ratio = imbalance_ratio(y_train) self.train_env = TFPyEnvironment(ClassifierEnv(X_train, y_train, imb_ratio)) self.global_episode = tf.Variable(0, name="global_episode", dtype=np.int64, trainable=False) # Global train episode counter # Custom epsilon decay: https://github.com/tensorflow/agents/issues/339 epsilon_decay = tf.compat.v1.train.polynomial_decay( 1.0, self.global_episode, self.decay_episodes, end_learning_rate=self.min_epsilon) self.q_net = Sequential(layers, self.train_env.observation_spec()) self.agent = DdqnAgent(self.train_env.time_step_spec(), self.train_env.action_spec(), q_network=self.q_net, optimizer=Adam(learning_rate=self.learning_rate), td_errors_loss_fn=loss_fn, train_step_counter=self.global_episode, target_update_period=self.target_update_period, target_update_tau=self.target_update_tau, gamma=self.gamma, epsilon_greedy=epsilon_decay, n_step_update=self.n_step_update, gradient_clipping=self.gradient_clipping) self.agent.initialize() self.random_policy = RandomTFPolicy(self.train_env.time_step_spec(), self.train_env.action_spec()) self.replay_buffer = TFUniformReplayBuffer(data_spec=self.agent.collect_data_spec, batch_size=self.train_env.batch_size, max_length=self.memory_length) self.warmup_driver = DynamicStepDriver(self.train_env, self.random_policy, observers=[self.replay_buffer.add_batch], num_steps=self.warmup_steps) # Uses a random policy self.collect_driver = DynamicStepDriver(self.train_env, self.agent.collect_policy, observers=[self.replay_buffer.add_batch], num_steps=self.collect_steps_per_episode) # Uses the epsilon-greedy policy of the agent self.agent.train = common.function(self.agent.train) # Optimalization self.warmup_driver.run = common.function(self.warmup_driver.run) self.collect_driver.run = common.function(self.collect_driver.run) self.compiled = True def train(self, *args) -> None: """Starts the training of the model. Includes warmup period, metrics collection and model saving. :param *args: All arguments will be passed to `collect_metrics()`. This can be usefull to pass callables, testing environments or validation data. Overwrite the TrainDDQN.collect_metrics() function to use your own *args. :type *args: Any :return: None :rtype: NoneType, last step is saving the model as a side-effect """ assert self.compiled, "Model must be compiled with model.compile_model(X_train, y_train, layers) before training." # Warmup period, fill memory with random actions if self.progressbar: print(f"\033[92mCollecting data for {self.warmup_steps:_} steps... This might take a few minutes...\033[0m") self.warmup_driver.run(time_step=None, policy_state=self.random_policy.get_initial_state(self.train_env.batch_size)) if self.progressbar: print(f"\033[92m{self.replay_buffer.num_frames():_} frames collected!\033[0m") dataset = self.replay_buffer.as_dataset(sample_batch_size=self.batch_size, num_steps=self.n_step_update + 1, num_parallel_calls=data.experimental.AUTOTUNE).prefetch(data.experimental.AUTOTUNE) iterator = iter(dataset) def _train(): experiences, _ = next(iterator) return self.agent.train(experiences).loss _train = common.function(_train) # Optimalization ts = None policy_state = self.agent.collect_policy.get_initial_state(self.train_env.batch_size) self.collect_metrics(*args) # Initial collection for step 0 for _ in range(self.episodes): if not self.global_episode % self.collect_every: # Collect a few steps using collect_policy and save to `replay_buffer` if self.collect_steps_per_episode != 0: ts, policy_state = self.collect_driver.run(time_step=ts, policy_state=policy_state) # Sample a batch of data from `replay_buffer` and update the agent's network train_loss = _train() if not self.global_episode % self.val_every: with self.writer.as_default(): tf.summary.scalar("train_loss", train_loss, step=self.global_episode) self.collect_metrics(*args) def collect_metrics(self, X_val: np.ndarray, y_val: np.ndarray, save_best: str = None): """Collects metrics using the trained Q-network. :param X_val: Features of validation data, same shape as X_train :type X_val: np.ndarray :param y_val: Labels of validation data, same shape as y_train :type y_val: np.ndarray :param save_best: Saving the best model of all validation runs based on given metric: Choose one of: {Gmean, F1, Precision, Recall, TP, TN, FP, FN} This improves stability since the model at the last episode is not guaranteed to be the best model. :type save_best: str """ y_pred = network_predictions(self.agent._target_q_network, X_val) stats = classification_metrics(y_val, y_pred) avgQ = np.mean(decision_function(self.agent._target_q_network, X_val)) # Max action for each x in X if save_best is not None: if not hasattr(self, "best_score"): # If no best model yet self.best_score = 0.0 if stats.get(save_best) >= self.best_score: # Overwrite best model self.save_network() # Saving directly to avoid shallow copy without trained weights self.best_score = stats.get(save_best) with self.writer.as_default(): tf.summary.scalar("AverageQ", avgQ, step=self.global_episode) # Average Q-value for this epoch for k, v in stats.items(): tf.summary.scalar(k, v, step=self.global_episode) def evaluate(self,X_train,y_train, X_test, y_test): """ Final evaluation of trained Q-network with X_test and y_test. Optional PR and ROC curve comparison to X_train, y_train to ensure no overfitting is taking place. :param X_test: Features of test data, same shape as X_train :type X_test: np.ndarray :param y_test: Labels of test data, same shape as y_train :type y_test: np.ndarray :param X_train: Features of train data :type X_train: np.ndarray :param y_train: Labels of train data :type y_train: np.ndarray """ #if hasattr(self, "best_score"): # print(f"\033[92mBest score: {self.best_score:6f}!\033[0m") # network = self.load_network(self.model_path) # Load best saved model #else: # network = self.agent._target_q_network # Load latest target model #network = self.load_network(self.model_path) #if (X_train is not None) and (y_train is not None): # plot_pr_curve(network, X_test, y_test, X_train, y_train) # plot_roc_curve(network, X_test, y_test, X_train, y_train) y_pred = network_predictions(self.agent._target_q_network, X_test) return classification_metrics(y_test, y_pred) def get_network(self): #network = self.load_network(self.model_path) return self.agent._target_q_network def save_network(self, filename_rl): #usnish """Saves Q-network as pickle to `model_path`.""" with open(self.filename_rl, "wb") as f: # Save Q-network as pickle pickle.dump(self.agent._target_q_network, f) @staticmethod def load_network(fp: str): """Static method to load Q-network pickle from given filepath. :param fp: Filepath to the saved pickle of the network :type fp: str :returns: The network-object loaded from a pickle file. :rtype: tensorflow.keras.models.Model """ with open(fp, "rb") as f: # Load the Q-network network = pickle.load(f) return network
classifierenv.py
import numpy as np from tf_agents.environments.py_environment import PyEnvironment from tf_agents.specs.array_spec import ArraySpec, BoundedArraySpec from tf_agents.trajectories import time_step as ts class ClassifierEnv(PyEnvironment): """ Custom `PyEnvironment` environment for imbalanced classification. Based on https://www.tensorflow.org/agents/tutorials/2_environments_tutorial """ def __init__(self, X_train: np.ndarray, y_train: np.ndarray, imb_ratio: float): """Initialization of environment with X_train and y_train. :param X_train: Features shaped: [samples, ..., ] :type X_train: np.ndarray :param y_train: Labels shaped: [samples] :type y_train: np.ndarray :param imb_ratio: Imbalance ratio of the data :type imb_ratio: float :returns: None :rtype: NoneType """ #print('1') self._action_spec = BoundedArraySpec(shape=(), dtype=np.int32, minimum=0, maximum=(len(np.unique(y_train)) - 1), name="action") #print(y_train) self._observation_spec = ArraySpec(shape=X_train.shape[1:], dtype=X_train.dtype, name="observation") #print('3') self._episode_ended = False self.X_train = X_train self.y_train = y_train self.imb_ratio = imb_ratio # Imbalance ratio: 0 < imb_ratio < 1 self.id = np.arange(self.X_train.shape[0]) # List of IDs to connect X and y data self.episode_step = 0 # Episode step, resets every episode self._state = self.X_train[self.id[self.episode_step]] def action_spec(self): """ Definition of the discrete actionspace. 1 for the positive/minority class, 0 for the negative/majority class. """ return self._action_spec def observation_spec(self): """Definition of the continous statespace e.g. the observations in typical RL environments.""" return self._observation_spec def _reset(self): """Shuffles data and returns the first state of the shuffled data to begin training on new episode.""" np.random.shuffle(self.id) # Shuffle the X and y data self.episode_step = 0 # Reset episode step counter at the end of every episode self._state = self.X_train[self.id[self.episode_step]] self._episode_ended = False # Reset terminal condition return ts.restart(self._state) def _step(self, action: int): """ Take one step in the environment. If the action is correct, the environment will either return 1 or `imb_ratio` depending on the current class. If the action is incorrect, the environment will either return -1 or -`imb_ratio` depending on the current class. """ if self._episode_ended: # The last action ended the episode. Ignore the current action and start a new episode return self.reset() env_action = self.y_train[self.id[self.episode_step]] # The label of the current state self.episode_step += 1 if action == env_action: # Correct action if env_action: # Minority reward = 1 # True Positive else: # Majority reward = self.imb_ratio # True Negative else: # Incorrect action if env_action: # Minority reward = -1 # False Negative self._episode_ended = True # Stop episode when minority class is misclassified else: # Majority reward = -self.imb_ratio # False Positive if self.episode_step == self.X_train.shape[0] - 1: # If last step in data self._episode_ended = True self._state = self.X_train[self.id[self.episode_step]] # Update state with new datapoint if self._episode_ended: return ts.termination(self._state, reward) else: return ts.transition(self._state, reward)
__init__.py
null
aionMlopsService.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' # -*- coding: utf-8 -*- # -*- coding: utf-8 -*- import logging logging.getLogger('tensorflow').disabled = True import json import mlflow import mlflow.sklearn import mlflow.sagemaker as mfs # from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split # from sklearn import datasets import time import numpy as np # Load dataset # from sklearn.datasets import load_iris import pickle # Load the pickled model # from matplotlib import pyplot import sys import os import boto3 import subprocess import os.path from os.path import expanduser import platform from pathlib import Path class aionMlopsService: def __init__(self,model,mlflowtosagemakerDeploy,mlflowtosagemakerPushOnly,mlflowtosagemakerPushImageName,mlflowtosagemakerdeployModeluri,experiment_name,mlflow_modelname,awsaccesskey_id,awssecretaccess_key,aws_session_token,mlflow_container_name,aws_region,aws_id,iam_sagemakerfullaccess_arn,sm_app_name,sm_deploy_option,delete_ecr_repository,ecrRepositoryName): try: self.model=model self.mlflowtosagemakerDeploy=mlflowtosagemakerDeploy self.mlflowtosagemakerPushOnly=str(mlflowtosagemakerPushOnly) self.mlflowtosagemakerPushImageName=str(mlflowtosagemakerPushImageName) self.mlflowtosagemakerdeployModeluri=str(mlflowtosagemakerdeployModeluri) self.experiment_name=experiment_name self.mlflow_modelname=mlflow_modelname self.awsaccesskey_id=awsaccesskey_id self.awssecretaccess_key=awssecretaccess_key self.aws_session_token=aws_session_token self.mlflow_container_name=mlflow_container_name self.aws_region=aws_region self.aws_id=aws_id self.iam_sagemakerfullaccess_arn=iam_sagemakerfullaccess_arn self.sm_app_name=sm_app_name self.sm_deploy_option=sm_deploy_option self.delete_ecr_repository=delete_ecr_repository self.ecrRepositoryName=ecrRepositoryName from appbe.dataPath import LOG_LOCATION sagemakerLogLocation = LOG_LOCATION try: os.makedirs(sagemakerLogLocation) except OSError as e: if (os.path.exists(sagemakerLogLocation)): pass else: raise OSError('sagemakerLogLocation error.') self.sagemakerLogLocation=str(sagemakerLogLocation) filename_mlops = 'mlopslog_'+str(int(time.time())) filename_mlops=filename_mlops+'.log' # filename = 'mlopsLog_'+Time() filepath = os.path.join(self.sagemakerLogLocation, filename_mlops) logging.basicConfig(filename=filepath, format='%(message)s',filemode='w') # logging.basicConfig(filename="uq_logging.log", format='%(asctime)s %(message)s',filemode='w') # logging.basicConfig(filename="uq_logging.log", format=' %(message)s',filemode='w') # logging.basicConfig(filename='uq_logging.log', encoding='utf-8', level=logging.INFO) self.log = logging.getLogger('aionMLOps') self.log.setLevel(logging.DEBUG) # mlflow.set_experiment(self.experiment_name) except Exception as e: self.log.info('<!------------- mlflow model INIT Error ---------------> '+str(e)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def mlflowSetPath(self,path): track_dir=os.path.join(path,'mlruns') uri="file:"+str(Path(track_dir)) return uri #Currently not used this delete ecr repository option def ecr_repository_delete(self,rep_name): # import subprocess client = boto3.client('ecr') repositories = client.describe_repositories() ecr_delete_rep=client.delete_repository(registryId=self.aws_id,repositoryName=self.ecrRepositoryName,force=True) mlflow_ecr_delete=subprocess.run(['aws', 'ecr', 'delete-repository','--repository-name',rep_name,'||','true']) self.log.info('Success: deleted aws ecr repository which contains mlops image.') def check_sm_deploy_status(self,app_name): sage_client = boto3.client('sagemaker', region_name=self.aws_region) endpoint_description = sage_client.describe_endpoint(EndpointName=app_name) endpoint_status = endpoint_description["EndpointStatus"] try: failure_reason=endpoint_description["FailureReason"] self.log.info("sagemaker end point creation failure reason is: "+str(failure_reason)) except: pass endpoint_status=str(endpoint_status) return endpoint_status def invoke_sm_endpoint(self,app_name, input_json): client = boto3.session.Session().client("sagemaker-runtime", self.aws_region) response = client.invoke_endpoint( EndpointName=app_name, Body=input_json, ContentType='application/json; format=pandas-split', ) # preds = response['Body'].read().decode("ascii") preds = response['Body'].read().decode("ascii") preds = json.loads(preds) # print("preds: {}".format(preds)) return preds def predict_sm_app_endpoint(self,X_test): #print(X_test) import pandas as pd prediction=None AWS_ACCESS_KEY_ID=str(self.awsaccesskey_id) AWS_SECRET_ACCESS_KEY=str(self.awssecretaccess_key) AWS_SESSION_TOKEN=str(self.aws_session_token) region = str(self.aws_region) #Existing model deploy options # mlflowtosagemakerPushImageName=str(self.mlflowtosagemakerPushImageName) # mlflowtosagemakerdeployModeluri=str(self.mlflowtosagemakerdeployModeluri) try: import subprocess cmd = 'aws configure set region_name '+region os.system(cmd) cmd = 'aws configure set aws_access_key_id '+AWS_ACCESS_KEY_ID os.system(cmd) cmd = 'aws configure set aws_secret_access_key '+AWS_SECRET_ACCESS_KEY os.system(cmd) ''' aws_region=subprocess.run(['aws', 'configure', 'set','region_name',region]) aws_accesskeyid=subprocess.run(['aws', 'configure', 'set','aws_access_key_id',AWS_ACCESS_KEY_ID]) aws_secretaccesskey=subprocess.run(['aws', 'configure', 'set','aws_secret_access_key',AWS_SECRET_ACCESS_KEY]) ''' except: pass #Create a session for aws communication using aws boto3 lib # s3_client = boto3.client('ecr',aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region) # s3 = boto3.resource('ecr', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key= AWS_SECRET_ACCESS_KEY) session = boto3.Session(aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region) #X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=2) # query_input = pd.DataFrame(X_test).iloc[[1,5]].to_json(orient="split") try: query_input = pd.DataFrame(X_test).to_json(orient="split") #print(query_input) prediction = self.invoke_sm_endpoint(app_name=self.sm_app_name, input_json=query_input) # self.log.info("sagemaker end point Prediction: \n"+str(prediction)) except Exception as e: print(e) return prediction def deleteSagemakerApp(self,app_name,region): # import mlflow.sagemaker as mfs # region = 'ap-south-1' # app_name = 'aion-demo-app' mfs.delete(app_name=app_name,region_name=region, archive=False,synchronous=True, timeout_seconds=300) # print("AION mlops sagemaker application endpoint is deleted....\n") self.log.info('AION mlops sagemaker application endpoint is deleted, application name is: '+str(app_name)) def deployModel2sagemaker(self,mlflow_container_name,tag_id,model_path): region = str(self.aws_region) aws_id = str(self.aws_id) iam_sagemakerfullaccess_arn = str(self.iam_sagemakerfullaccess_arn) app_name = str(self.sm_app_name) model_uri = str(model_path) app_status=False mlflow_root_dir = None try: os.chdir(str(self.sagemakerLogLocation)) mlflow_root_dir = os.getcwd() self.log.info('mlflow root dir: '+str(mlflow_root_dir)) except: self.log.info("path issue.") try: c_status=self.check_sm_deploy_status(app_name) #if ((c_status == "Failed") or (c_status == "OutOfService")): if ((c_status == "Failed") or (c_status.lower() == "failed")): app_status=False self.log.info("Sagemaker endpoint status: Failed.\n") mfs.delete(app_name=app_name,region_name=region, archive=False,synchronous=True, timeout_seconds=300) elif ((c_status.lower() == "inservice") or (c_status == "InService")): app_status=True self.log.info("Sagemaker endpoint status: InService. Running sagemaker endpoint name: \n"+str(app_name)) else: app_status=False pass except: # print("deploy status error.\n") pass #aws ecr model app_name should contain only [[a-zA-Z0-9-]] import re if app_name: pattern = re.compile("[A-Za-z0-9-]+") # if found match (entire string matches pattern) if pattern.fullmatch(app_name) is not None: #print("Found match: ") pass else: app_name = 'aion-demo-app' else: app_name = 'aion-demo-app' mlflow_image=mlflow_container_name+':'+tag_id image_url = aws_id + '.dkr.ecr.' + region + '.amazonaws.com/' + mlflow_image deploy_option="create" self.log.info('deploy_option: \n'+str(deploy_option)) if (deploy_option.lower() == "create"): # Other deploy modes: mlflow.sagemaker.DEPLOYMENT_MODE_ADD,mlflow.sagemaker.DEPLOYMENT_MODE_REPLACE if not (app_status): try: mfs.deploy(app_name=app_name,model_uri=model_uri,region_name=region,mode="create",execution_role_arn=iam_sagemakerfullaccess_arn,image_url=image_url) self.log.info('sagemaker endpoint created and model deployed. Application name is: \n'+str(app_name)) except: self.log.info('Creating end point application issue.Please check the connection and aws credentials \n') else: self.log.info('Sagemaker application with user endpoint name already running.Please check. Please delete the old endpoint with same name.\n') elif (deploy_option.lower() == "delete"): # import mlflow.sagemaker as mfs # # region = 'ap-south-1' # # app_name = 'aion-demo-app' # mfs.delete(app_name=app_name,region_name=region, archive=False,synchronous=True, timeout_seconds=300) # print("Mlflow sagemaker application endpoint is deleted....\n") # self.log.info('Mlflow sagemaker application endpoint is deleted, application name is: '+str(app_name)) pass elif (deploy_option.lower() == "add"): pass elif (deploy_option.lower() == "replace"): pass else: pass return app_status def mlflow2sagemaker_deploy(self): self.log.info('<!------------- Inside AION mlops to sagemaker communication and deploy process. ---------------> ') deploy_status=False app_name = str(self.sm_app_name) self.log.info('Sagemaker Application Name: '+str(app_name)) uri_mlflow=self.mlflowSetPath(self.sagemakerLogLocation) mlflow.set_tracking_uri(uri_mlflow) mlops_trackuri=mlflow.get_tracking_uri() mlops_trackuri=str(mlops_trackuri) self.log.info('mlops tracking uri: '+str(mlops_trackuri)) localhost_deploy=False try: #Loading aion model to deploy in sagemaker mlflow.set_experiment(self.experiment_name) self.log.info('Endpoint Name: '+str(self.experiment_name)) # Assume, the model already loaded from joblib in aionmlflow2smInterface.py file. aionmodel2deploy=self.model # run_id = None # experiment_id=None # Use the loaded pickled model to make predictions # pred = knn_from_pickle.predict(X_test) with mlflow.start_run(run_name='AIONMLOps') as run: # aionmodel2deploy.fit(X_train, y_train) # predictions = aionmodel2deploy.predict(X_test) mlflow.sklearn.log_model(aionmodel2deploy, self.mlflow_modelname) run_id = run.info.run_uuid experiment_id = run.info.experiment_id self.log.info('AION mlops experiment run_id: '+str(run_id)) self.log.info('AION mlops experiment experiment_id: '+str(experiment_id)) self.log.info('AION mlops experiment model_name: '+str(self.mlflow_modelname)) artifact_uri = {mlflow.get_artifact_uri()} # print("1.artifact_uri: \n",artifact_uri) mlflow.end_run() #If we need, we can check the mlflow experiments. # try: # mlflow_client = mlflow.tracking.MlflowClient('./mlruns') # exp_list = mlflow_client.list_experiments() # except: # pass #print("mlflow exp_list: \n",exp_list) mlflow_modelname=str(self.mlflow_modelname) mlops_trackuri=mlops_trackuri.replace('file:','') mlops_trackuri=str(mlops_trackuri) # mlflow_root_dir = os.getcwd() mlflow_root_dir = None try: os.chdir(str(self.sagemakerLogLocation)) mlflow_root_dir = os.getcwd() self.log.info('mlflow root dir: '+str(mlflow_root_dir)) except: self.log.info("path issue.") model_path = 'mlruns/%s/%s/artifacts/%s' % (experiment_id, run_id,self.mlflow_modelname) # model_path=mlops_trackuri+'\\%s\\%s\\artifacts\\%s' % (experiment_id, run_id,mlflow_modelname) self.log.info("local host aion mlops model_path is: "+str(model_path)) time.sleep(2) #print("Environment variable setup in the current working dir for aws sagemaker cli connection... \n") self.log.info('Environment variable setup in the current working dir for aws sagemaker cli connection... \n ') AWS_ACCESS_KEY_ID=str(self.awsaccesskey_id) AWS_SECRET_ACCESS_KEY=str(self.awssecretaccess_key) AWS_SESSION_TOKEN=str(self.aws_session_token) region = str(self.aws_region) #Existing model deploy options mlflowtosagemakerPushImageName=str(self.mlflowtosagemakerPushImageName) mlflowtosagemakerdeployModeluri=str(self.mlflowtosagemakerdeployModeluri) import subprocess cmd = 'aws configure set region_name '+region os.system(cmd) cmd = 'aws configure set aws_access_key_id '+AWS_ACCESS_KEY_ID os.system(cmd) cmd = 'aws configure set aws_secret_access_key '+AWS_SECRET_ACCESS_KEY os.system(cmd) #Create a session for aws communication using aws boto3 lib # s3_client = boto3.client('ecr',aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region) # s3 = boto3.resource('ecr', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key= AWS_SECRET_ACCESS_KEY) session = boto3.Session(aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region) # session = boto3.session.Session( # aws_access_key_id=AWS_ACCESS_KEY_ID, # aws_secret_access_key=AWS_SECRET_ACCESS_KEY, # aws_session_token=AWS_SESSION_TOKEN # ) # awsclient = session.resource('ecr') # s3 = session.resource('s3') self.log.info('aws environment variable setup done... \n') try: os.chdir(mlflow_root_dir) except FileNotFoundError: self.log.info('Directory does not exist. '+str(mlflow_root_dir)) except NotADirectoryError: self.log.info('model_path is not a directory. '+str(mlflow_root_dir)) except PermissionError: self.log.info('Issue in permissions to change to model dir. '+str(mlflow_root_dir)) mlflow_container_name=str(self.mlflow_container_name) mlflow_version=mlflow.__version__ tag_id=mlflow_version if (self.mlflowtosagemakerPushOnly.lower() == "true"): self.log.info('Selected option is <Deploy existing model to sagemaker> \n') aws_id=str(self.aws_id) arn=str(self.iam_sagemakerfullaccess_arn) mlflow_image=mlflow_container_name+':'+tag_id image_url = aws_id+'.dkr.ecr.'+region+'.amazonaws.com/'+mlflow_image # print("image_url:========= \n",image_url) deploy_status=True try: model_path=mlflowtosagemakerdeployModeluri # ##We need to run mlflow docker container command in the artifacts->model directory inside mlruns. self.log.info('Deploy existing model container-Model path given by user: '+str(model_path)) try: os.chdir(model_path) except FileNotFoundError: self.log.info('Directory does not exist. '+str(model_path)) except NotADirectoryError: self.log.info('model_path is not a directory. '+str(model_path)) except PermissionError: self.log.info('Issue in permissions to change to model dir. '+str(model_path)) try: mfs.push_image_to_ecr(image=mlflowtosagemakerPushImageName) deploy_status=True self.log.info('AION mlops pushed the docker container to aws ecr. \n ') except: self.log.info("error in pushing existing container to ecr.\n") deploy_status=False time.sleep(2) #Now,change the working dir to root dir,because now deploy needs full mlruns to model name dir. try: # print(" Changing directory to mlflow root dir....\n") os.chdir(mlflow_root_dir) except FileNotFoundError: self.log.info('model path is not a directory. '+str(mlflow_root_dir)) except NotADirectoryError: self.log.info('model path is not a directory. '+str(mlflow_root_dir)) # print("{0} is not a directory".format(mlflow_root_dir)) except PermissionError: self.log.info('Issue in permissions to change to model dir. '+str(mlflow_root_dir)) # self.deployModel2sagemaker(mlflowtosagemakerPushImageName,tag_id,mlflowtosagemakerdeployModeluri) try: if (deploy_status): self.deployModel2sagemaker(mlflowtosagemakerPushImageName,tag_id,mlflowtosagemakerdeployModeluri) self.log.info('AION creates docker container and push the container into aws ecr.. ') time.sleep(2) except: self.log.info('AION deploy error.check connection and aws config parameters. ') deploy_status=False # self.log.info('model deployed in sagemaker. ') except Exception as e: self.log.info('AION mlops failed to push docker container in aws ecr, check configuration parameters. \n'+str(e)) elif (self.mlflowtosagemakerPushOnly.lower() == "false"): if (self.mlflowtosagemakerDeploy.lower() == "true"): self.log.info('Selected option is <Create and Deploy model> \n') deploy_status=True try: # ##We need to run mlflow docker container command in the artifacts->model directory inside mlruns. try: os.chdir(model_path) except FileNotFoundError: self.log.info('Directory does not exist. '+str(model_path)) except NotADirectoryError: self.log.info('model_path is not a directory. '+str(model_path)) except PermissionError: self.log.info('Issue in permissions to change to model dir. '+str(model_path)) try: mlflow_container_push=subprocess.run(['mlflow', 'sagemaker', 'build-and-push-container','--build','--push','--container',mlflow_container_name]) self.log.info('AION mlops creates docker container and push the container into aws ecr.. ') deploy_status=True time.sleep(2) except: self.log.info('error in pushing aion model container to sagemaker, please check the connection between local host to aws server.') deploy_status=False self.log.info('Now deploying the model container to sagemaker starts....\n ') # Once docker push completes, again going back to mlflow parent dir for deployment #Now,change the working dir to root dir,because now deploy needs full mlruns to model name dir. try: os.chdir(mlflow_root_dir) except FileNotFoundError: self.log.info('model_path does not exist. '+str(mlflow_root_dir)) except NotADirectoryError: self.log.info('model_path is not a directory. '+str(mlflow_root_dir)) except PermissionError: self.log.info('Issue in permissions to change to model dir. '+str(mlflow_root_dir)) # app_name = str(self.sm_app_name) try: if (deploy_status): self.deployModel2sagemaker(mlflow_container_name,tag_id,model_path) except: self.log.info('mlops deploy error.check connection') deploy_status=False except Exception as e: exc = {"status":"FAIL","message":str(e).strip('"')} out_exc = json.dumps(exc) self.log.info('mlflow failed to creates docker container please check the aws iam,ecr permission setup, aws id access_key,secret key values for aion.\n') elif(self.mlflowtosagemakerDeploy.lower() == "false"): deploy_status=False localhost_deploy=True self.log.info('Selected option is <Create AION mlops container in local host .> \n') self.log.info("User selected create-Deploy sagemaker option as False,") self.log.info("Creates the AION mlops-sagemaker container locally starting,but doesn't push into aws ecr and deploy in sagemaker. Check the container in docker repository. ") try: # ##We need to run AION mlops docker container command in the artifacts->model directory inside mlruns. try: os.chdir(model_path) self.log.info('After change to AION mlops model dir, cwd: '+str(model_path)) except FileNotFoundError: self.log.info('Directory does not exist. '+str(model_path)) except NotADirectoryError: self.log.info('model_path is not a directory. '+str(model_path)) except PermissionError: self.log.info('Issue in permissions to change to model dir. '+str(model_path)) # mlflow_container_local=subprocess.run(['AION mlops', 'sagemaker', 'build-and-push-container','--build','--no-push','--container',mlflow_container_name]) try: if not (deploy_status): mlflow_container_local=subprocess.run(['mlflow', 'sagemaker', 'build-and-push-container','--build','--no-push','--container',mlflow_container_name]) self.log.info('AION creates local host bsed docker container and push the container local docker repository. Check with <docker images> command.\n ') localhost_deploy=True time.sleep(2) except: self.log.info('error in pushing aion model container to sagemaker, please check the connection between local host to aws server.') deploy_status=False localhost_deploy=False # print("AION mlops creates docker container and push the container into aws ecr.\n") self.log.info('AION mlops creates docker container and stored locally... ') time.sleep(2) except Exception as e: localhost_deploy=False # print("mlflow failed to creates docker container please check the aws iam,ecr permission setup, aws id access_key,secret key values for aion.\n") self.log.info('AION mlops failed to creates docker container in local machine.\n'+str(e)) else: self.log.info('Deploy option not selected, Please check. ') localhost_deploy=False deploy_status=False else: pass localhost_container_status="Notdeployed" mlflow2sm_deploy_status="Notdeployed" if localhost_deploy: localhost_container_status="success" mlflow2sm_deploy_status="Notdeployed" # print("AION creates local docker container successfully.Please check in docker repository.") self.log.info("AION creates local docker container successfully.Please check in docker repository.") # else: # localhost_container_status="failed" # # print("AION failed to create local docker container successfully.Please check in docker repository.") # self.log.info("AION failed to create local docker container successfully.Please check in docker repository.") if (deploy_status): # Finally checking whether mlops model is deployed to sagemaker or not. app_name = str(self.sm_app_name) deploy_s = self.check_sm_deploy_status(app_name) if (deploy_s == "InService"): # print("AION mlops model is deployed at aws sagemaker, use application name(app_name) and region to access.\n") self.log.info('AION mlops model is deployed at aws sagemaker, use application name(app_name) and region to access.\n'+str(app_name)) mlflow2sm_deploy_status="success" localhost_container_status="Notdeployed" else: # print("AION Mlflow model not able to deploy at aws sagemaker\n") self.log.info('AION mlops model not able to deploy at aws sagemaker.\n') mlflow2sm_deploy_status="failed" localhost_container_status="Notdeployed" # else: # mlflow2sm_deploy_status="None" return mlflow2sm_deploy_status,localhost_container_status except Exception as inst: exc = {"status":"FAIL","message":str(inst).strip('"')} out_exc = json.dumps(exc)
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
baseline.py
import joblib import pandas as pd import sys import math import time import pandas as pd import numpy as np from sklearn.metrics import confusion_matrix from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score from sklearn.metrics import r2_score,mean_absolute_error,mean_squared_error from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, confusion_matrix from sklearn.svm import SVC from sklearn.linear_model import LinearRegression import argparse import json def mltesting(modelfile,datafile,features,target): model = joblib.load(modelfile) ProblemName = model.__class__.__name__ if ProblemName in ['LogisticRegression','SGDClassifier','SVC','DecissionTreeClassifier','RandomForestClassifier','GaussianNB','KNeighborsClassifier','DecisionTreeClassifier','GradientBoostingClassifier','XGBClassifier','LGBMClassifier','CatBoostClassifier']: Problemtype = 'Classification' elif ProblemName in ['LinearRegression','Lasso','Ridge','DecisionTreeRegressor','RandomForestRegressor','GradientBoostingRegressor','XGBRegressor','LGBMRegressor','CatBoostRegressor']: Problemtype = 'Regression' else: Problemtype = 'Unknown' if Problemtype == 'Classification': Params = model.get_params() try: df = pd.read_csv(datafile,encoding='utf-8',skipinitialspace = True) if ProblemName == 'LogisticRegression' or ProblemName == 'DecisionTreeClassifier' or ProblemName == 'RandomForestClassifier' or ProblemName == 'GaussianNB' or ProblemName == 'KNeighborsClassifier' or ProblemName == 'GradientBoostingClassifier' or ProblemName == 'SVC': features = model.feature_names_in_ elif ProblemName == 'XGBClassifier': features = model.get_booster().feature_names elif ProblemName == 'LGBMClassifier': features = model.feature_name_ elif ProblemName == 'CatBoostClassifier': features = model.feature_names_ modelfeatures = features dfp = df[modelfeatures] tar = target target = df[tar] predic = model.predict(dfp) output = {} matrixconfusion = pd.DataFrame(confusion_matrix(predic,target)) matrixconfusion = matrixconfusion.to_json(orient='index') classificationreport = pd.DataFrame(classification_report(target,predic,output_dict=True)).transpose() classificationreport = round(classificationreport,2) classificationreport = classificationreport.to_json(orient='index') output["Precision"] = "%.2f" % precision_score(target, predic,average='weighted') output["Recall"] = "%.2f" % recall_score(target, predic,average='weighted') output["Accuracy"] = "%.2f" % accuracy_score(target, predic) output["ProblemName"] = ProblemName output["Status"] = "Success" output["Params"] = Params output["Problemtype"] = Problemtype output["Confusionmatrix"] = matrixconfusion output["classificationreport"] = classificationreport # import statistics # timearray = [] # for i in range(0,5): # start = time.time() # predic1 = model.predict(dfp.head(1)) # end = time.time() # timetaken = (round((end - start) * 1000,2),'Seconds') # timearray.append(timetaken) # print(timearray) start = time.time() for i in range(0,5): predic1 = model.predict(dfp.head(1)) end = time.time() timetaken = (round((end - start) * 1000,2),'Seconds') # print(timetaken) start1 = time.time() for i in range(0,5): predic2 = model.predict(dfp.head(10)) end1 = time.time() timetaken1 = (round((end1 - start1) * 1000,2) ,'Seconds') # print(timetaken1) start2 = time.time() for i in range(0,5): predic3 = model.predict(dfp.head(100)) end2 = time.time() timetaken2 = (round((end2 - start2) * 1000,2) ,'Seconds') # print(timetaken2) output["onerecord"] = timetaken output["tenrecords"] = timetaken1 output["hundrecords"] = timetaken2 print(json.dumps(output)) except Exception as e: output = {} output['Problemtype']='Classification' output['Status']= "Fail" output["ProblemName"] = ProblemName output["Msg"] = 'Detected Model : {} \\n Problem Type : Classification \\n Error : {}'.format(ProblemName, str(e).replace('"','//"').replace('\n', '\\n')) print(output["Msg"]) print(json.dumps(output)) elif Problemtype == 'Regression': Params = model.get_params() try: df = pd.read_csv(datafile,encoding='utf-8',skipinitialspace = True) if ProblemName == 'LinearRegression' or ProblemName == 'Lasso' or ProblemName == 'Ridge' or ProblemName == 'DecisionTreeRegressor' or ProblemName == 'RandomForestRegressor' or ProblemName == 'GaussianNB' or ProblemName == 'KNeighborsRegressor' or ProblemName == 'GradientBoostingRegressor': features = model.feature_names_in_ elif ProblemName == 'XGBRegressor': features = model.get_booster().feature_names elif ProblemName == 'LGBMRegressor': features = model.feature_name_ elif ProblemName == 'CatBoostRegressor': features = model.feature_names_ modelfeatures = features dfp = df[modelfeatures] tar = target target = df[tar] predict = model.predict(dfp) mse = mean_squared_error(target, predict) mae = mean_absolute_error(target, predict) rmse = math.sqrt(mse) r2 = r2_score(target,predict,multioutput='variance_weighted') output = {} output["MSE"] = "%.2f" % mean_squared_error(target, predict) output["MAE"] = "%.2f" % mean_absolute_error(target, predict) output["RMSE"] = "%.2f" % math.sqrt(mse) output["R2"] = "%.2f" %r2_score(target,predict,multioutput='variance_weighted') output["ProblemName"] = ProblemName output["Problemtype"] = Problemtype output["Params"] = Params output['Status']='Success' start = time.time() predic1 = model.predict(dfp.head(1)) end = time.time() timetaken = (round((end - start) * 1000,2) ,'Seconds') # print(timetaken) start1 = time.time() predic2 = model.predict(dfp.head(10)) end1 = time.time() timetaken1 = (round((end1 - start1) * 1000,2),'Seconds') # print(timetaken1) start2 = time.time() predic3 = model.predict(dfp.head(100)) end2 = time.time() timetaken2 = (round((end2 - start2) * 1000,2) ,'Seconds') # print(timetaken2) output["onerecord"] = timetaken output["tenrecords"] = timetaken1 output["hundrecords"] = timetaken2 print(json.dumps(output)) except Exception as e: output = {} output['Problemtype']='Regression' output['Status']='Fail' output["ProblemName"] = ProblemName output["Msg"] = 'Detected Model : {} \\n Problem Type : Regression \\n Error : {}'.format(ProblemName, str(e).replace('"','//"').replace('\n', '\\n')) print(json.dumps(output)) else: output = {} output['Problemtype']='Unknown' output['Status']='Fail' output['Params'] = '' output["ProblemName"] = ProblemName output["Msg"] = 'Detected Model : {} \\n Error : {}'.format(ProblemName, 'Model not supported') print(json.dumps(output)) return(json.dumps(output)) def baseline_testing(modelFile,csvFile,features,target): features = [x.strip() for x in features.split(',')] return mltesting(modelFile,csvFile,features,target)
item_rating.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import pandas as pd import numpy as np import os import datetime, time, timeit from sklearn.model_selection import KFold from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report import pickle import logging class recommendersystem(): def __init__(self,features,svd_params): self.features = features self.svd_input = svd_params self.log = logging.getLogger('eion') print ("recommendersystem starts \n") #To extract dict key,values def extract_params(self,dict): self.dict=dict for k,v in self.dict.items(): return k,v def recommender_model(self,df,outputfile): from sklearn.metrics.pairwise import cosine_similarity from utils.file_ops import save_csv USER_ITEM_MATRIX = 'user_item_matrix' ITEM_SIMILARITY_MATRIX = 'item_similarity_matrix' selectedColumns = self.features.split(',') data = pd.DataFrame() for i in range(0,len(selectedColumns)): data[selectedColumns[i]] = df[selectedColumns[i]] dataset = data self.log.info('-------> Top(5) Rows') self.log.info(data.head(5)) start = time.time() self.log.info('\n----------- Recommender System Training Starts -----------') #--------------- Task 11190:recommender system changes Start ---Usnish------------------# # selectedColumns = ['userId', 'movieId', 'rating'] df_eda = df.groupby(selectedColumns[1]).agg(mean_rating=(selectedColumns[2], 'mean'),number_of_ratings=(selectedColumns[2], 'count')).reset_index() self.log.info('-------> Top 10 most rated Items:') self.log.info(df_eda.sort_values(by='number_of_ratings', ascending=False).head(10)) matrix = data.pivot_table(index=selectedColumns[1], columns=selectedColumns[0], values=selectedColumns[2]) relative_file = os.path.join(outputfile, 'data', USER_ITEM_MATRIX + '.csv') matrix.to_csv(relative_file) item_similarity_cosine = cosine_similarity(matrix.fillna(0)) item_similarity_cosine = pd.DataFrame(item_similarity_cosine,columns=pd.Series([i + 1 for i in range(item_similarity_cosine.shape[0])],name='ItemId'),index=pd.Series([i + 1 for i in range(item_similarity_cosine.shape[0])],name='ItemId')) self.log.info('---------> Item-Item Similarity matrix created:') self.log.info(item_similarity_cosine.head(5)) relative_file = os.path.join(outputfile, 'data', ITEM_SIMILARITY_MATRIX + '.csv') save_csv(item_similarity_cosine,relative_file) # --------------- recommender system changes End ---Usnish------------------# executionTime=time.time() - start self.log.info("------->Execution Time: "+str(executionTime)) self.log.info('----------- Recommender System Training End -----------\n') return "filename",matrix,"NA","",""
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
text_similarity.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import numpy as np import pickle import pandas as pd import sys import time import os from os.path import expanduser import platform from sklearn.preprocessing import binarize import logging import tensorflow as tf from sklearn.model_selection import train_test_split from tensorflow.keras import preprocessing from sklearn.metrics import roc_auc_score from sklearn.metrics import accuracy_score from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.layers import Input, Embedding, LSTM, Lambda import tensorflow.keras.backend as K from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam from tensorflow.keras.layers import Concatenate from tensorflow.keras.layers import Input, Dense, Flatten, GlobalMaxPool2D, GlobalAvgPool2D, Concatenate, Multiply, Dropout, Subtract, Add, Conv2D from sklearn.metrics.pairwise import cosine_similarity, cosine_distances import tensorflow.keras.backend as K from tensorflow.keras.models import Model, Sequential from tensorflow.keras import layers, utils, callbacks, optimizers, regularizers ## Keras subclassing based siamese network class siameseNetwork(Model): def __init__(self, activation,inputShape, num_iterations): self.activation=activation self.log = logging.getLogger('eion') super(siameseNetwork, self).__init__() i1 = layers.Input(shape=inputShape) i2 = layers.Input(shape=inputShape) featureExtractor = self.build_feature_extractor(inputShape, num_iterations) f1 = featureExtractor(i1) f2 = featureExtractor(i2) #distance vect distance = layers.Concatenate()([f1, f2]) cosine_loss = tf.keras.losses.CosineSimilarity(axis=1) c_loss=cosine_loss(f1, f2) similarity = tf.keras.layers.Dot(axes=1,normalize=True)([f1,f2]) outputs = layers.Dense(1, activation="sigmoid")(distance) self.model = Model(inputs=[i1, i2], outputs=outputs) ##Build dense sequential layers def build_feature_extractor(self, inputShape, num_iterations): layers_config = [layers.Input(inputShape)] for i, n_units in enumerate(num_iterations): layers_config.append(layers.Dense(n_units)) layers_config.append(layers.Dropout(0.2)) layers_config.append(layers.BatchNormalization()) layers_config.append(layers.Activation(self.activation)) model = Sequential(layers_config, name='feature_extractor') return model def call(self, x): return self.model(x) def euclidean_distance(vectors): (f1, f2) = vectors sumSquared = K.sum(K.square(f1 - f2), axis=1, keepdims=True) return K.sqrt(K.maximum(sumSquared, K.epsilon())) def cosine_similarity(vectors): (f1, f2) = vectors f1 = K.l2_normalize(f1, axis=-1) f2 = K.l2_normalize(f2, axis=-1) return K.mean(f1 * f2, axis=-1, keepdims=True) def cos_dist_output_shape(shapes): shape1, shape2 = shapes return (shape1[0],1) class eion_similarity_siamese: def __init__(self): self.log = logging.getLogger('eion') def siamese_model(self,df,col1,col2,targetColumn,conf,pipe,deployLocation,iterName,iterVersion,testPercentage,predicted_data_file): try: self.log.info('-------> Read Embedded File') home = expanduser("~") if platform.system() == 'Windows': modelsPath = os.path.join(home,'AppData','Local','HCLT','AION','PreTrainedModels','TextSimilarity') else: modelsPath = os.path.join(home,'HCLT','AION','PreTrainedModels','TextSimilarity') if os.path.isdir(modelsPath) == False: os.makedirs(modelsPath) embedding_file_path = os.path.join(modelsPath,'glove.6B.100d.txt') if not os.path.exists(embedding_file_path): from pathlib import Path import urllib.request import zipfile location = modelsPath local_file_path = os.path.join(location,"glove.6B.zip") file_test, header_test = urllib.request.urlretrieve('http://nlp.stanford.edu/data/wordvecs/glove.6B.zip', local_file_path) with zipfile.ZipFile(local_file_path, 'r') as zip_ref: zip_ref.extractall(location) os.unlink(os.path.join(location,"glove.6B.zip")) if os.path.isfile(os.path.join(location,"glove.6B.50d.txt")): os.unlink(os.path.join(location,"glove.6B.50d.txt")) if os.path.isfile(os.path.join(location,"glove.6B.300d.txt")): os.unlink(os.path.join(location,"glove.6B.300d.txt")) if os.path.isfile(os.path.join(location,"glove.6B.200d.txt")): os.unlink(os.path.join(location,"glove.6B.200d.txt")) X = df[[col1,col2]] Y = df[targetColumn] testPercentage = testPercentage self.log.info('\n-------------- Test Train Split ----------------') if testPercentage == 0: xtrain=X ytrain=Y xtest=X ytest=Y else: testSize=testPercentage/100 self.log.info('-------> Split Type: Random Split') self.log.info('-------> Train Percentage: '+str(testSize)) X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=testSize) self.log.info('-------> Train Data Shape: '+str(X_train.shape)+' ---------->') self.log.info('-------> Test Data Shape: '+str(X_test.shape)+' ---------->') self.log.info('-------------- Test Train Split End ----------------\n') self.log.info('\n-------------- Train Validate Split ----------------') X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.20, random_state=42) self.log.info('-------> Train Data Shape: '+str(X_train.shape)+' ---------->') self.log.info('-------> Validate Data Shape: '+str(X_val.shape)+' ---------->') self.log.info('-------------- Train Validate Split End----------------\n') self.log.info('Status:- |... Train / test split done: '+str(100-testPercentage)+'% train,'+str(testPercentage)+'% test') train_sentence1 = pipe.texts_to_sequences(X_train[col1].values) train_sentence2 = pipe.texts_to_sequences(X_train[col2].values) val_sentence1 = pipe.texts_to_sequences(X_val[col1].values) val_sentence2 = pipe.texts_to_sequences(X_val[col2].values) len_vec = [len(sent_vec) for sent_vec in train_sentence1] max_len = np.max(len_vec) len_vec = [len(sent_vec) for sent_vec in train_sentence2] if (max_len < np.max(len_vec)): max_len = np.max(len_vec) train_sentence1 = pad_sequences(train_sentence1, maxlen=max_len, padding='post') train_sentence2 = pad_sequences(train_sentence2, maxlen=max_len, padding='post') val_sentence1 = pad_sequences(val_sentence1, maxlen=max_len, padding='post') val_sentence2 = pad_sequences(val_sentence2, maxlen=max_len, padding='post') y_train = y_train.values y_val = y_val.values activation = str(conf['activation']) model = siameseNetwork(activation,inputShape=train_sentence1.shape[1], num_iterations=[10]) model.compile( loss="binary_crossentropy", optimizer=optimizers.Adam(learning_rate=0.0001), metrics=["accuracy"]) es = callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=1, restore_best_weights=True) rlp = callbacks.ReduceLROnPlateau( monitor='val_loss', factor=0.1, patience=2, min_lr=1e-10, mode='min', verbose=1 ) x_valid=X_val y_valid=y_val n_epoch = int(conf['num_epochs']) batch_size = int(conf['batch_size']) similarityIndex = conf['similarityIndex'] model.fit([train_sentence1,train_sentence2],y_train.reshape(-1,1), epochs = n_epoch,batch_size=batch_size, validation_data=([val_sentence1, val_sentence2],y_val.reshape(-1,1)),callbacks=[es, rlp]) scores = model.evaluate([val_sentence1, val_sentence2], y_val.reshape(-1,1), verbose=0) self.log.info('-------> Model Score Matrix: Accuracy') self.log.info('-------> Model Score (Validate Data) : '+str(scores[1])) self.log.info('Status:- |... Algorithm applied: SIAMESE') test_sentence1 = pipe.texts_to_sequences(X_test[col1].values) test_sentence2 = pipe.texts_to_sequences(X_test[col2].values) test_sentence1 = pad_sequences(test_sentence1, maxlen=max_len, padding='post') test_sentence2 = pad_sequences(test_sentence2, maxlen=max_len, padding='post') prediction = model.predict([test_sentence1, test_sentence2 ]) n_epoch = conf['num_epochs'] batch_size = conf['batch_size'] activation = conf['activation'] similarityIndex = conf['similarityIndex'] self.log.info('-------> similarityIndex : '+str(similarityIndex)) prediction = np.where(prediction > similarityIndex,1,0) rocauc_sco = roc_auc_score(y_test,prediction) acc_sco = accuracy_score(y_test, prediction) predict_df = pd.DataFrame() predict_df['actual'] = y_test predict_df['predict'] = prediction predict_df.to_csv(predicted_data_file) self.log.info('-------> Model Score Matrix: Accuracy') self.log.info('-------> Model Score (Validate Data) : '+str(scores[1])) self.log.info('Status:- |... Algorithm applied: SIAMESE') test_sentence1 = pipe.texts_to_sequences(X_test[col1].values) test_sentence2 = pipe.texts_to_sequences(X_test[col2].values) test_sentence1 = pad_sequences(test_sentence1, maxlen=max_len, padding='post') test_sentence2 = pad_sequences(test_sentence2, maxlen=max_len, padding='post') prediction = model.predict([test_sentence1, test_sentence2 ]) prediction = np.where(prediction > similarityIndex,1,0) rocauc_sco = roc_auc_score(y_test,prediction) acc_sco = accuracy_score(y_test, prediction) predict_df = pd.DataFrame() predict_df['actual'] = y_test predict_df['predict'] = prediction predict_df.to_csv(predicted_data_file) self.log.info("predict_df: \n"+str(predict_df)) sco = acc_sco self.log.info('-------> Test Data Accuracy Score : '+str(acc_sco)) self.log.info('Status:- |... Testing Score: '+str(acc_sco)) self.log.info('-------> Test Data ROC AUC Score : '+str(rocauc_sco)) matrix = '"Accuracy":'+str(acc_sco)+',"ROC AUC":'+str(rocauc_sco) prediction = model.predict([train_sentence1, train_sentence2]) prediction = np.where(prediction > similarityIndex,1,0) train_rocauc_sco = roc_auc_score(y_train,prediction) train_acc_sco = accuracy_score(y_train, prediction) self.log.info('-------> Train Data Accuracy Score : '+str(train_acc_sco)) self.log.info('-------> Train Data ROC AUC Score : '+str(train_rocauc_sco)) trainmatrix = '"Accuracy":'+str(train_acc_sco)+',"ROC AUC":'+str(train_rocauc_sco) model_tried = '{"Model":"SIAMESE","Score":'+str(sco)+'}' saved_model = 'textsimilarity_'+iterName+'_'+iterVersion # filename = os.path.join(deployLocation,'model','textsimilarity_'+iterName+'_'+iterVersion+'.sav') # filename = os.path.join(deployLocation,'model','textsimilarity_'+iterName+'_'+iterVersion+'.h5') ## Because we are using subclassing layer api, please use dir (as below) to store deep learn model instead of .h5 model. filename = os.path.join(deployLocation,'model','textsimilarity_'+iterName+'_'+iterVersion) model.save(filename) # model.save_weights(filename) model_name = 'SIAMESE MODEL' return(model_name,scores[1],matrix,trainmatrix,model_tried,saved_model,filename,max_len,similarityIndex) except Exception as inst: self.log.info("SIAMESE failed " + str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
__init__.py
null
pipelines.py
import itertools import logging from typing import Optional, Dict, Union from nltk import sent_tokenize import torch from transformers import( AutoModelForSeq2SeqLM, AutoTokenizer, PreTrainedModel, PreTrainedTokenizer, ) logger = logging.getLogger(__name__) class QGPipeline: """Poor man's QG pipeline""" def __init__( self, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, ans_model: PreTrainedModel, ans_tokenizer: PreTrainedTokenizer, qg_format: str, use_cuda: bool ): self.model = model self.tokenizer = tokenizer self.ans_model = ans_model self.ans_tokenizer = ans_tokenizer self.qg_format = qg_format self.device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu" self.model.to(self.device) if self.ans_model is not self.model: self.ans_model.to(self.device) assert self.model.__class__.__name__ in ["T5ForConditionalGeneration", "BartForConditionalGeneration"] if "T5ForConditionalGeneration" in self.model.__class__.__name__: self.model_type = "t5" else: self.model_type = "bart" def __call__(self, inputs: str): inputs = " ".join(inputs.split()) sents, answers = self._extract_answers(inputs) flat_answers = list(itertools.chain(*answers)) if len(flat_answers) == 0: return [] if self.qg_format == "prepend": qg_examples = self._prepare_inputs_for_qg_from_answers_prepend(inputs, answers) else: qg_examples = self._prepare_inputs_for_qg_from_answers_hl(sents, answers) qg_inputs = [example['source_text'] for example in qg_examples] questions = self._generate_questions(qg_inputs) output = [{'answer': example['answer'], 'question': que} for example, que in zip(qg_examples, questions)] return output def _generate_questions(self, inputs): inputs = self._tokenize(inputs, padding=True, truncation=True) outs = self.model.generate( input_ids=inputs['input_ids'].to(self.device), attention_mask=inputs['attention_mask'].to(self.device), max_length=32, num_beams=4, ) questions = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in outs] return questions def _extract_answers(self, context): sents, inputs = self._prepare_inputs_for_ans_extraction(context) inputs = self._tokenize(inputs, padding=True, truncation=True) outs = self.ans_model.generate( input_ids=inputs['input_ids'].to(self.device), attention_mask=inputs['attention_mask'].to(self.device), max_length=32, ) dec = [self.ans_tokenizer.decode(ids, skip_special_tokens=False) for ids in outs] answers = [item.split('<sep>') for item in dec] answers = [i[:-1] for i in answers] return sents, answers def _tokenize(self, inputs, padding=True, truncation=True, add_special_tokens=True, max_length=512 ): inputs = self.tokenizer.batch_encode_plus( inputs, max_length=max_length, add_special_tokens=add_special_tokens, truncation=truncation, padding="max_length" if padding else False, pad_to_max_length=padding, return_tensors="pt" ) return inputs def _prepare_inputs_for_ans_extraction(self, text): sents = sent_tokenize(text) inputs = [] for i in range(len(sents)): source_text = "extract answers:" for j, sent in enumerate(sents): if i == j: sent = "<hl> %s <hl>" % sent source_text = "%s %s" % (source_text, sent) source_text = source_text.strip() if self.model_type == "t5": source_text = source_text + " </s>" inputs.append(source_text) return sents, inputs def _prepare_inputs_for_qg_from_answers_hl(self, sents, answers): inputs = [] for i, answer in enumerate(answers): if len(answer) == 0: continue for answer_text in answer: sent = sents[i] sents_copy = sents[:] answer_text = answer_text.strip() ans_start_idx = 0 # ans_start_idx = sent.index(answer_text) # if answer_text in sent: # ans_start_idx = sent.index(answer_text) # else: # continue sent = f"{sent[:ans_start_idx]} <hl> {answer_text} <hl> {sent[ans_start_idx + len(answer_text): ]}" sents_copy[i] = sent source_text = " ".join(sents_copy) source_text = f"generate question: {source_text}" if self.model_type == "t5": source_text = source_text + " </s>" inputs.append({"answer": answer_text, "source_text": source_text}) return inputs def _prepare_inputs_for_qg_from_answers_prepend(self, context, answers): flat_answers = list(itertools.chain(*answers)) examples = [] for answer in flat_answers: source_text = f"answer: {answer} context: {context}" if self.model_type == "t5": source_text = source_text + " </s>" examples.append({"answer": answer, "source_text": source_text}) return examples class MultiTaskQAQGPipeline(QGPipeline): def __init__(self, **kwargs): super().__init__(**kwargs) def __call__(self, inputs: Union[Dict, str]): if type(inputs) is str: # do qg return super().__call__(inputs) else: # do qa return self._extract_answer(inputs["question"], inputs["context"]) def _prepare_inputs_for_qa(self, question, context): source_text = f"question: {question} context: {context}" if self.model_type == "t5": source_text = source_text + " </s>" return source_text def _extract_answer(self, question, context): source_text = self._prepare_inputs_for_qa(question, context) inputs = self._tokenize([source_text], padding=False) outs = self.model.generate( input_ids=inputs['input_ids'].to(self.device), attention_mask=inputs['attention_mask'].to(self.device), max_length=16, ) answer = self.tokenizer.decode(outs[0], skip_special_tokens=True) return answer class E2EQGPipeline: def __init__( self, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, use_cuda: bool ) : self.model = model self.tokenizer = tokenizer self.device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu" self.model.to(self.device) assert self.model.__class__.__name__ in ["T5ForConditionalGeneration", "BartForConditionalGeneration"] if "T5ForConditionalGeneration" in self.model.__class__.__name__: self.model_type = "t5" else: self.model_type = "bart" self.default_generate_kwargs = { "max_length": 256, "num_beams": 4, "length_penalty": 1.5, "no_repeat_ngram_size": 3, "early_stopping": True, } def __call__(self, context: str, **generate_kwargs): inputs = self._prepare_inputs_for_e2e_qg(context) # TODO: when overrding default_generate_kwargs all other arguments need to be passsed # find a better way to do this if not generate_kwargs: generate_kwargs = self.default_generate_kwargs input_length = inputs["input_ids"].shape[-1] # max_length = generate_kwargs.get("max_length", 256) # if input_length < max_length: # logger.warning( # "Your max_length is set to {}, but you input_length is only {}. You might consider decreasing max_length manually, e.g. summarizer('...', max_length=50)".format( # max_length, input_length # ) # ) outs = self.model.generate( input_ids=inputs['input_ids'].to(self.device), attention_mask=inputs['attention_mask'].to(self.device), **generate_kwargs ) prediction = self.tokenizer.decode(outs[0], skip_special_tokens=True) questions = prediction.split("<sep>") questions = [question.strip() for question in questions[:-1]] return questions def _prepare_inputs_for_e2e_qg(self, context): source_text = f"generate questions: {context}" if self.model_type == "t5": source_text = source_text + " </s>" inputs = self._tokenize([source_text], padding=False) return inputs def _tokenize( self, inputs, padding=True, truncation=True, add_special_tokens=True, max_length=512 ): inputs = self.tokenizer.batch_encode_plus( inputs, max_length=max_length, add_special_tokens=add_special_tokens, truncation=truncation, padding="max_length" if padding else False, pad_to_max_length=padding, return_tensors="pt" ) return inputs SUPPORTED_TASKS = { "question-generation": { "impl": QGPipeline, "default": { "model": "valhalla/t5-small-qg-hl", "ans_model": "valhalla/t5-small-qa-qg-hl", } }, "multitask-qa-qg": { "impl": MultiTaskQAQGPipeline, "default": { "model": "valhalla/t5-small-qa-qg-hl", } }, "e2e-qg": { "impl": E2EQGPipeline, "default": { "model": "valhalla/t5-small-e2e-qg", } } } def pipeline( task: str, model: Optional = None, tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None, qg_format: Optional[str] = "highlight", ans_model: Optional = None, ans_tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None, use_cuda: Optional[bool] = True, **kwargs, ): # Retrieve the task if task not in SUPPORTED_TASKS: raise KeyError("Unknown task {}, available tasks are {}".format(task, list(SUPPORTED_TASKS.keys()))) targeted_task = SUPPORTED_TASKS[task] task_class = targeted_task["impl"] # Use default model/config/tokenizer for the task if no model is provided if model is None: model = targeted_task["default"]["model"] # Try to infer tokenizer from model or config name (if provided as str) if tokenizer is None: if isinstance(model, str): tokenizer = model else: # Impossible to guest what is the right tokenizer here raise Exception( "Impossible to guess which tokenizer to use. " "Please provided a PretrainedTokenizer class or a path/identifier to a pretrained tokenizer." ) # Instantiate tokenizer if needed if isinstance(tokenizer, (str, tuple)): if isinstance(tokenizer, tuple): # For tuple we have (tokenizer name, {kwargs}) tokenizer = AutoTokenizer.from_pretrained(tokenizer[0], **tokenizer[1]) else: tokenizer = AutoTokenizer.from_pretrained(tokenizer) # Instantiate model if needed if isinstance(model, str): model = AutoModelForSeq2SeqLM.from_pretrained(model) if task == "question-generation": if ans_model is None: # load default ans model ans_model = targeted_task["default"]["ans_model"] ans_tokenizer = AutoTokenizer.from_pretrained(ans_model) ans_model = AutoModelForSeq2SeqLM.from_pretrained(ans_model) else: # Try to infer tokenizer from model or config name (if provided as str) if ans_tokenizer is None: if isinstance(ans_model, str): ans_tokenizer = ans_model else: # Impossible to guest what is the right tokenizer here raise Exception( "Impossible to guess which tokenizer to use. " "Please provided a PretrainedTokenizer class or a path/identifier to a pretrained tokenizer." ) # Instantiate tokenizer if needed if isinstance(ans_tokenizer, (str, tuple)): if isinstance(ans_tokenizer, tuple): # For tuple we have (tokenizer name, {kwargs}) ans_tokenizer = AutoTokenizer.from_pretrained(ans_tokenizer[0], **ans_tokenizer[1]) else: ans_tokenizer = AutoTokenizer.from_pretrained(ans_tokenizer) if isinstance(ans_model, str): ans_model = AutoModelForSeq2SeqLM.from_pretrained(ans_model) if task == "e2e-qg": return task_class(model=model, tokenizer=tokenizer, use_cuda=use_cuda) elif task == "question-generation": return task_class(model=model, tokenizer=tokenizer, ans_model=ans_model, ans_tokenizer=ans_tokenizer, qg_format=qg_format, use_cuda=use_cuda) else: return task_class(model=model, tokenizer=tokenizer, ans_model=model, ans_tokenizer=tokenizer, qg_format=qg_format, use_cuda=use_cuda)
__init__.py
null
__init__.py
null
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
aionNAS.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import logging logging.getLogger('tensorflow').disabled = True import json #from nltk.corpus import stopwords from collections import Counter from numpy import mean from numpy import std from pandas import read_csv from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.compose import ColumnTransformer from learner.machinelearning import machinelearning # from sklearn.dummy import DummyClassifier # create histograms of numeric input variables import sys import os import re import pandas as pd import numpy as np from learner.aion_matrix import aion_matrix import tensorflow as tf tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) import autokeras as ak # load the sonar dataset from sklearn.model_selection import train_test_split # from sklearn.metrics import cohen_kappa_score # from sklearn.metrics import roc_auc_score # from sklearn.metrics import confusion_matrix from sklearn.metrics import roc_curve from math import sqrt from sklearn.metrics import mean_squared_error, explained_variance_score,mean_absolute_error from sklearn import metrics class aionNAS: def __init__(self,nas_class,nas_params,xtrain1,xtest1,ytrain1,ytest1,deployLocation): try: self.dfFeatures=None self.nas_class=nas_class self.nas_params=nas_params self.targetFeature=None self.log = logging.getLogger('eion') self.n_models=int(self.nas_params['n_models']) self.n_epochs=int(self.nas_params['n_epochs']) self.optimizer=self.nas_params['optimizer'] self.metrics=self.nas_params['metrics'] self.tuner=self.nas_params['tuner'] self.seed=int(self.nas_params['seed']) self.xtrain = xtrain1 self.xtest = xtest1 self.ytrain = ytrain1 self.ytest = ytest1 #self.labelMaps = labelMaps self.deployLocation=deployLocation except Exception as e: self.log.info('<!------------- NAS INIT Error ---------------> ') exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def paramCheck(self): try: if not (self.nas_class): self.log.info('<!------------- NAS class input Error ---------------> ') if not (self.nas_params): self.log.info('<!------------- NAS model hyperparameter input Error ---------------> ') if not (self.targetFeature): self.log.info('<!------------- NAS model targetFeature input Error ---------------> ') if (self.n_models < 1): self.n_models=1 if not (self.dfFeatures): self.log.info('<!------------- NAS model features Error ---------------> ') if (self.n_epochs < 1): self.n_models=1 if not (self.optimizer): self.optimizer="adam" if not (self.tuner): self.tuner="greedy" if (self.seed < 1): self.seed=0 if not (self.metrics): self.metrics=None except ValueError: self.log.info('<------------------ NAS config file error. --------------->') def recall_m(self,y_true, y_pred): true_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_true * y_pred, 0, 1))) possible_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + tf.keras.backend.epsilon()) return recall def precision_m(self,y_true, y_pred): true_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_true * y_pred, 0, 1))) predicted_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + tf.keras.backend.epsilon()) return precision def f1_score(self,y_true, y_pred): precision = self.precision_m(y_true, y_pred) recall = self.recall_m(y_true, y_pred) return 2*((precision*recall)/(precision+recall+tf.keras.backend.epsilon())) def nasStructdataPreprocess(self): df=self.data self.paramCheck() target=df[self.targetFeature].values counter = Counter(target) for k,v in counter.items(): per = v / len(target) * 100 self.log.info('autokeras struct Class=%d, Count=%d, Percentage=%.3f%%' % (k, v, per)) # select columns with numerical data types num_ix = df.select_dtypes(include=['int64', 'float64']).columns subset = df[num_ix] last_ix = len(df.columns) - 1 y=df[self.targetFeature] X = df.drop(self.targetFeature, axis=1) #Using Pearson Correlation # plt.figure(figsize=(12,10)) # cor = df.corr() # sns.heatmap(cor, annot=True, cmap=plt.cm.Reds) # plt.show() # select categorical features cat_ix = X.select_dtypes(include=['object', 'bool']).columns # one hot encode cat features only ct = ColumnTransformer([('o',OneHotEncoder(),cat_ix)], remainder='passthrough') X = X.reset_index() X=X.replace(to_replace="NULL",value=0) X = X.dropna(how='any',axis=0) X = ct.fit_transform(X) from sklearn.preprocessing import scale X = scale(X) # label encode the target variable to have the classes 0 and 1 y = LabelEncoder().fit_transform(y) # separate into train and test sets X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=self.test_size,random_state=1) return X_train, X_test, y_train, y_test def nasStructClassification(self,scoreParam): try: objClf = aion_matrix() X_train, X_test, y_train, y_test= self.xtrain, self.xtest, self.ytrain, self.ytest modelName="nas_structdata_classifier" self.log.info("Processing structured data block...\n") s_in = ak.StructuredDataInput() #s_in = Flatten()(s_in) s_out = ak.StructuredDataBlock(categorical_encoding=True)(s_in) self.log.info("Data pipe via autokeras Classification Dense layers ...\n") s_out = ak.ClassificationHead()(s_out) self.log.info("applying autokeras automodel to run different neural models...\n") try: tuner = str(self.tuner).lower() except UnicodeEncodeError: tuner = (self.tuner.encode('utf8')).lower() nasclf = ak.AutoModel( inputs=s_in, outputs=s_out, overwrite=True, tuner=tuner, max_trials=self.n_models, seed=self.seed) # compile the model #nasclf.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc',self.f1_score,self.precision_m, self.recall_m]) nasclf.fit(X_train, y_train, epochs=self.n_epochs) best_model = nasclf.export_model() mpredict=best_model.predict(X_test) mtpredict=best_model.predict(X_train) #loss, accuracy, f1_score, precision, recall = nasclf.evaluate(X_test, y_test, verbose=0) #from sklearn.metrics import classification_report #Classification report y_pred_bool = np.argmax(mpredict, axis=1) y_train_pred_bool = np.argmax(mtpredict, axis=1) score = objClf.get_score(scoreParam,y_test, y_pred_bool) #best_model = nasclf.export_model() best_model_summary=best_model.summary() filename = os.path.join(self.deployLocation,'log','summary.txt') with open(filename,'w') as f: best_model.summary(print_fn=lambda x: f.write(x + '\n')) f.close() #self.log.info("==========") #self.log.info(best_model_summary) self.log.info("NAS struct data classification, best model summary: \n"+str(best_model.summary(print_fn=self.log.info))) #self.log.info("==========") #Save and load model # # #try: # try: # best_model.save("model_class_autokeras", save_format="tf") # except Exception: # best_model.save("model_class_autokeras.h5") # loaded_model = load_model("model_class_autokeras", custom_objects=ak.CUSTOM_OBJECTS) # loadedmodel_predict=loaded_model.predict(X_test) loss,accuracy_m=nasclf.evaluate(X_test, y_test) #mpredict_classes = mpredict.argmax(axis=-1) #accuracy = accuracy_score(y_test.astype(int), mpredict.astype(int)) # precision tp / (tp + fp) #precision = precision_score(y_test.astype(int), mpredict.astype(int),average='macro') # recall: tp / (tp + fn) #recall = recall_score(y_test.astype(int), mpredict.astype(int),average='macro') #f1score=f1_score(y_test.astype(int), mpredict.astype(int) , average="macro") self.log.info("Autokeras struct data classification metrics: \n") except Exception as inst: self.log.info("Error: NAS failed "+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) print(inst) return modelName,nasclf,score def nasStructRegressor(self,scoreParam): objClf = aion_matrix() modelName="nas_struct_regressor" #self.paramCheck() X_train, X_test, y_train, y_test= self.xtrain, self.xtest, self.ytrain, self.ytest # Autokeras alg s_in = ak.StructuredDataInput() #tf.keras.layers.GlobalMaxPooling2D()(s_in) s_out = ak.StructuredDataBlock(categorical_encoding=True)(s_in) self.log.info("Data pipe via autokeras Regression Dense layers ...\n") s_out = ak.RegressionHead(loss='mse', metrics=['mae'])(s_out) self.log.info("applying autokeras automodel to evaluate different neural models...\n") try: tuner = str(self.tuner).lower() except UnicodeEncodeError: tuner = (self.tuner.encode('utf8')).lower() nas_reg = ak.AutoModel( inputs=s_in, outputs=s_out, overwrite=True, tuner=tuner, max_trials=self.n_models) nas_reg.fit(X_train, y_train, epochs=self.n_epochs) best_model = nas_reg.export_model() self.log.info("NAS struct data regression best model summary: \n") best_model_summary=best_model.summary(print_fn=self.log.info) self.log.info(best_model_summary) predictm=best_model.predict(X_test) mtpredict=best_model.predict(X_train) score = objClf.get_score(scoreParam,y_test, predictm) self.log.info("Autokeras struct data regression metrics: \n") return modelName,nas_reg,score def nasMain(self,scoreParam): modelName = "" nasclf=None nas_reg=None #text_reg_model=None mse_value=0 reg_rmse=0 mape_reg=0 huber_loss_reg=0 accuracy=0 precision=0 recall=0 #Dummy values to return main for classification problems dummy_score_1=int(0) #dummy_score_2=int(0) try: if ((self.nas_class.lower() == "classification")): modelName,nasclf,score=self.nasStructClassification(scoreParam) self.log.info('NAS Struct Classification score: '+str(score)) best_model_nas = nasclf.export_model() scoredetails = '{"Model":"NAS","Score":'+str(round(score,2))+'}' return best_model_nas,self.nas_params,round(score,2),'NAS',-1,-1,-1 elif (self.nas_class.lower() == "regression"): modelName,nas_reg,score =self.nasStructRegressor(scoreParam) self.log.info('NAS Struct Regression score: '+str(score)) best_model_nas = nas_reg.export_model() ''' filename = os.path.join(self.deployLocation,'model','autoKerasModel') best_model_nas = nas_reg.export_model() try: best_model_nas.save(filename, save_format="tf") modelName = 'autoKerasModel' except Exception: filename = os.path.join(self.deployLocation,'model','autoKerasModel.h5') best_model_nas.save(filename) modelName = 'autoKerasModel.h5' ''' scoredetails = '{"Model":"NAS","Score":'+str(round(score,2))+'}' ''' error_matrix = '"MSE":"'+str(round(mse_value,2))+'","RMSE":"'+str(round(reg_rmse,2))+'","MAPE":"'+str(round(mape_reg,2))+'","MSLE":"'+str(round(msle_reg,2))+'"' ''' return best_model_nas,self.nas_params,score,'NAS' else: pass except Exception as inst: print(inst) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) output = {"status":"FAIL","message":str(inst).strip('"')} output = json.dumps(output)
pushrecords.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import socket import os import rsa from os.path import expanduser from pathlib import Path import requests import platform from appbe.dataPath import DATA_DIR import socket import getmac import subprocess import sys import json from datetime import datetime import binascii computername = socket.getfqdn() global_key = ''' -----BEGIN RSA PUBLIC KEY----- MIIBCgKCAQEAzJcxqRiUpp7CzViyqNlYaeyceDh5y6Ib4SoxoyNkN3+k0q+cr1lb k0KdWTtHIVqH1wsLYofYjpB7X2RN0KYTv8VfwmfQNrpFEbiRz4gcAeuxGCPgGaue N1ttujQMWHWCcY+UH5Voh8YUfkW8P+T3zxvr1d30D+kVBJC59y/31JvTzr3Bw/T+ NYv6xiienYiEYtm9d5ATioEwZOXaQBrtVvRmqcod5A1h4kn1ZauLX2Ph8H4TAuit NLtw6xUCJNumphP7xdU+ca6P6a6eaLprgKhvky+nz16u9/AC2AazRQHKWf8orS6b fw16JDCRs0zU4mTQLCjkUUt0edOaRhUtcQIDAQAB -----END RSA PUBLIC KEY----- ''' quarter_key = ''' -----BEGIN RSA PUBLIC KEY----- MIIBCgKCAQEAmKzOJxVEV9ulA+cjfxguAduLMD47OWjLcEAEmEuK8vR4O5f6e2h1 08NniGC+nkwqmM00U7JTVBkqnt9S/JgE3pAH2xwfWda2OvXNWisWmOQdqB0+XRHh NXsIG3yRk/sMlDpe7MJIyM5ADSu01PLn9FZTfmMq7lEp32tAf71cuUE/dwuWSvEQ WK2hn1L4D97O43XCd7FHtMSHfgtjdcCFgX9IRgWLKC8Bm3q5qcqF4v3cHuYTj3V9 njxPtRqPg6HJFiJrm9AX5bUEHAvbTcw4wAmsNTRQHPvVB+Lc+yGh5x8crhKjNB01 gdB5I3a4mPO7dKvadR6Mr28trr0Ff5t2HQIDAQAB -----END RSA PUBLIC KEY----- ''' halfYear_key=''' -----BEGIN RSA PUBLIC KEY----- MIIBCgKCAQEAgrGNwl8CNYQmVxi8/GEgPjfL5aEmyPkDyaJb9h4hZDSZCeeKd7Rv wwhuRTdBBfOp0bQ7QS7NYMg38Xlc3x85I9RnxdQdDKn2nRuvG0hG3wMBFy/DCSXF tXbDjJkLijAhqcBNu8m+a2Gtn14ShC7TbcfY4iVXho3WFUrn0xq6S5ducqWCsLJh R+TNImCaMICqfoAzEDGC3ojO5Hi3vJmmyK5CVp6bt4wLRATQjcp1ujGW4Uv4kEgp 7TR077c226v1KOdKdyZPHJzT1MKwZrG2Gdluk3/Y1apbwyGzYqFdTCOAB+mE73Dn wFXURgDJQmaU2oxxaA13WRcELpnirm+aIwIDAQAB -----END RSA PUBLIC KEY----- ''' oneYear_key=''' -----BEGIN RSA PUBLIC KEY----- MIIBCgKCAQEA3GLqn+vkKn3fTNH3Bbb3Lq60pCoe+mn0KPz74Bp7p5OkZAUe14pP Tcf/UqdPwiENhSCseWtfZmfKDK8qYRHJ5xW02+AhHPPdiacS45X504/lGG3q/4SG ZgaFhMDvX+IH/ZH+qqbU3dRQhXJCCrAVAa7MonzM6yPiVeS2SdpMkNg1VDR1oTLB Pn+qSV6CnkK1cYtWCRQ23GH2Ru7fc09r7m8hVcifKJze84orpHC5FX0WScQuR8h/ fs1IbGkxTOxP8vplUj/cd4JjUxgd+w+8R4kcoPhdGZF5UGeZA8xMERzQLvh+4Ui0 KIvz5/iyKB/ozaeSG0OMwDAk3WDEnb1WqQIDAQAB -----END RSA PUBLIC KEY----- ''' full_key=''' -----BEGIN RSA PUBLIC KEY----- MIIBCgKCAQEAhqfNMuYYLdVrePhkO9rU/qT6FgolzI0YyzIJ2OeJE+++JioYm6nn ohQU32iiE0DZlCCLrHJXOOIAz2Op80goX0lxtngyxVUPsiB5CI77sAC7x6K3anJ0 elpnQCC0+xV2ZL5eIMNQHLe+X6wJl/HGWqkUlxKpWr4/kBEB4EisW60OePfhntIN 4OUJ7iEq+sDdOM5WazJIXeNV1cig4i6057GE3k5ITcQUmw17DZu2+dqkIscckaG+ t5SF7Qnvt4IY8IeQp2htx3yD+CJCV0u2uKwoSFMGJn3OWdaixC3+eojyMXmfAWtQ Ee9NLNNaTCMIvQ8BeItJLQs2Htw3bZNMvwIDAQAB -----END RSA PUBLIC KEY----- ''' def validate_key_Pair(privatepath,publickey): with open(privatepath, 'rb') as privatefile: keydata = privatefile.read() privatefile.close() try: privkey = rsa.PrivateKey.load_pkcs1(keydata,'PEM') data = 'Validate Global License' signature = rsa.sign(data.encode('utf-8'), privkey, 'SHA-1') pubkey = rsa.PublicKey.load_pkcs1(publickey) except: return False try: rsa.verify(data.encode('utf-8'), signature, pubkey) return True except Exception as e: return False def updateDRecord(licensepath): domain_license_path = os.path.join(DATA_DIR,'License','license_domain.lic') if(os.path.isfile(licensepath)): with open(licensepath, 'rb') as f: licensekey = f.read() f.close() with open(domain_license_path, 'wb') as f: f.write(licensekey) f.close() if(validate_key_Pair(domain_license_path,global_key)): return True,'Valid Domain License' else: return False,'Invalid Domain License' else: return False,'File Not Exists' def generateLicenseKey(userKey): record = {'UserKey':userKey} record = json.dumps(record) status = 'Error' url = 'https://qw7e33htlk.execute-api.ap-south-1.amazonaws.com/default/aion_license' try: response = requests.post(url, data=record,headers={"x-api-key":"3cQKRkKA4S57pYrkFp1Dd9jRXt4xnFoB9iqhAQRM","Content-Type":"application/json",}) if response.status_code == 200: outputStr=response.content outputStr = outputStr.decode('utf-8','ignore') outputStr = outputStr.strip() license_dict = json.loads(str(outputStr)) if license_dict['status'] == 'success': status = 'Success' licenseKey = license_dict['msg'] else: status = 'Error' licenseKey = '' else: status = 'Error' licenseKey = '' except Exception as inst: print(inst) status = 'Error' licenseKey = '' msg = {'status':status,'key':userKey,'licenseKey':licenseKey,'link':''} return msg def updateRecord(licensepath): currentDirectory = os.path.dirname(os.path.abspath(__file__)) license_path = os.path.join(currentDirectory,'..','lic','license.lic') if(os.path.isfile(licensepath)): with open(licensepath, 'rb') as f: licensekey = f.read() f.close() with open(license_path, 'wb') as f: f.write(licensekey) f.close() status,msg = check_domain_license() if status: status,msg = getdaysfromstartdate() if status: status,msg = check_days_license(int(msg)) return status,msg else: return False,'File Not Exists' def check_domain_license(): if 'CORP.HCL.IN' in computername: return True,'HCL Domain' else: return True,'HCL Domain' def diff_month(d1, d2): return (d1.year - d2.year) * 12 + d1.month - d2.month def getdaysfromstartdate(): currentDirectory = os.path.dirname(os.path.abspath(__file__)) startdatePath = os.path.join(currentDirectory,'..','lic','startdate.txt') if(os.path.isfile(startdatePath)): with open(startdatePath, "rb") as fl: encrypted_message = fl.read() fl.close() privkey = '''-----BEGIN RSA PRIVATE KEY----- MIIEqwIBAAKCAQEAm75ZwaepuxGJjU1Slk1+IUO2E49Hy8i9dym5FUaBRyTRH6R+ GTF1kcpd+1QinIZDMIdsmAc95Y8pTufxY30QxCkOhVASitSQWHS/IiWQHmsTJwdr 38lqZnQQloOt/iPlhcavbxu/yKFzwBmp+nM+ErDTnCBh6EGCGrw1xWF30T2IBpmp WwMEoqZsFV69RzwQAw39KG1KCxi5uscrB62YPgUdlT2b4Yaa90egQhGLLVdnKvhP ORiGT9omCH90Dkm1oMMQ0Y2JBLezgXa/bunSqtTBxEwzlwUAX2JJcanFYrzKy2OL xzwNRlWUXilZ4R/1RHAgUdNyKbYxZqc24MApoQIDAQABAoIBAQCHZ/i7gNz10qqH 2qkqGlfF7gvYd6MRTwdDGlhbYgA17ZGP9EDaAIFabtpFEAJDmgvCnotQpkMvWcet XcUmHW89TQDd8R8d6u9QqLggpQ3nFGsDbNViLMjAKLrfUb8tjOIZ7ANNE5ArjAuK AgYhxJ48O9bPD+xvtLwip95PHxMMz1CF0vxrpCinvPdeC3HzcnLNZWN3ustbph/4 Tx8mrKDpAVIHVYVbY4CMtm7NbIBYdyR9Lokc4zBg/OTuLo+0QRVJ3GHAN6cGxTwY vLwN9iBBHyn9WBp5NIOSoCdob7+ce8y+X8yHmVhwRCfcrYphzfFNfP7SPNzV1dLs dFybn/h9AoGJALCOC7ss+PBXy5WrWVNRPzFO7KrJDl5q7s/gMk0PkB4i4XOKHDTl MhHZXhxp84HwpphwNxPHvpFe3pVZwwoe8LH1neoodlLOF0Kuk3jENh6cMhKFvcZ+ gxaBxGSCOXF/U307mh0i4AafClhVjxtLgBW5iJSVA9Brc7ZqVwxlUP7aYGzReIE1 uEMCeQDh0vq8NteUlkM/wpNzrHHqgtEzePbTYa+QcTm4xhARHR/cO+E0/mZIfltw 3NVWCIalMia+aKnvRHqHy/cQfEo2Uv/h8oARWnbrvicMRTwYL0w2GrP0f+aG0RqQ msLMzS3kp6szhM7C99reFxdlxJoWBKkp94psOksCgYkApB01zGRudkK17EcdvjPc sMHzfoFryNpPaI23VChuR4UW2mZ797NAypSqRXE7OALxaOuOVuWqP8jW0C9i/Cps hI+SnZHFAw2tU3+hd3Wz9NouNUd6c2MwCSDQ5LikGttHSTa49/JuGdmGLTxCzRVu V0NiMPMfW4I2Sk8o4U3gbzWgwiYohLrhrwJ5ANun/7IB2lIykvk7B3g1nZzRYDIk EFpuI3ppWA8NwOUUoj/zksycQ9tx5Pn0JCMKKgYXsS322ozc3B6o3AoSC5GpzDH4 UnAOwavvC0ZZNeoEX6ok8TP7EL3EOYW8s4zIa0KFgPac0Q0+T4tFhMG9qW+PWwhy Oxeo3wKBiCQ8LEgmHnXZv3UZvwcikj6oCrPy8fnhp5RZl2DPPlaqf3vokE6W5oEo LIKcWKvth3EU7HRKwYgaznj/Mw55aETx31R0FiXMG266B4V7QWPF/KuaR0GBsYfu +edGXQCnLgooKlMtQLdL5mcLXHc9x/0Z0iYEejJtbjcGR87WylSNaCH3hH703iQ= -----END RSA PRIVATE KEY----- ''' privkey = rsa.PrivateKey.load_pkcs1(privkey,'PEM') decrypted_message = rsa.decrypt(encrypted_message, privkey) decrypted_message = decrypted_message.decode() import datetime start_time = datetime.datetime.strptime(decrypted_message, '%Y-%m-%d') current_date = datetime.datetime.today().strftime('%Y-%m-%d') current_date = datetime.datetime.strptime(current_date, '%Y-%m-%d') Months = diff_month(current_date,start_time) return True,Months else: return False,'Start Date Not Exists' def check_days_license(months): currentDirectory = os.path.dirname(os.path.abspath(__file__)) license_path = os.path.join(currentDirectory,'..','lic','license.lic') if(os.path.isfile(license_path)): if(validate_key_Pair(license_path,full_key)): return True,'Valid License' elif(validate_key_Pair(license_path,oneYear_key)): if months <= 12: return True,'Valid License' else: return False,'License for AI.ON has expired. Please contact ERS Research for renewal.' elif(validate_key_Pair(license_path,halfYear_key)): if months <= 6: return True,'Valid License' else: return False,'License for AI.ON has expired. Please contact ERS Research for renewal.' elif(validate_key_Pair(license_path,quarter_key)): if months <= 3: return True,'Valid License' else: return False,'License for AI.ON has expired. Please contact ERS Research for renewal.' else: return False,'Invalid License' else: return False,'License Not exists.Please contact ERS Research for renewal.' def checklicense(): import binascii license_path = os.path.join(DATA_DIR,'License','license.lic') if(os.path.isfile(license_path)): try: with open(license_path, 'r') as privatefile: license_key = privatefile.read() privatefile.close() encrypted_message = binascii.unhexlify(license_key.encode()) privkey = '''-----BEGIN RSA PRIVATE KEY----- MIIEqQIBAAKCAQEAhqfNMuYYLdVrePhkO9rU/qT6FgolzI0YyzIJ2OeJE+++JioY m6nnohQU32iiE0DZlCCLrHJXOOIAz2Op80goX0lxtngyxVUPsiB5CI77sAC7x6K3 anJ0elpnQCC0+xV2ZL5eIMNQHLe+X6wJl/HGWqkUlxKpWr4/kBEB4EisW60OePfh ntIN4OUJ7iEq+sDdOM5WazJIXeNV1cig4i6057GE3k5ITcQUmw17DZu2+dqkIscc kaG+t5SF7Qnvt4IY8IeQp2htx3yD+CJCV0u2uKwoSFMGJn3OWdaixC3+eojyMXmf AWtQEe9NLNNaTCMIvQ8BeItJLQs2Htw3bZNMvwIDAQABAoIBAGGmuRnrYaeDeWAO CmqZxRMyQybOjyDrRgq9rAR/zJoHp8b3ikcBDTkuBQELWVZLFj7k50XU2cono9zC cxI5xwVrNqrUOkV+7VYJVJzPTFkT/xnEt+zbOfstKmmIDpdzthtTLuHlomhhHA83 rPFi5a0Dpynz35suEnm6ONxx4ICONa3xkQ51ALm8EEsdJ+qRQhi2HLTF/OVZMxSa A2DlFd4ChOEbYaN63xVCDxPXe9BfeHd/Rnim9x4xL9i2RL+mhARUy/ZP6LMHIPk7 NxTrGr4TuE/ETg8FZ3cywSnwsMlcplXo8Ar+5ths2XKxbmH1TI/vuQV1r7r0IeqV F4W/xOkCgYkAiDQy7/WyJWuT+rQ+gOjSUumXgWE3HO+vJAsy05cTZFSs+nUE4ctn FnvbBIRuClSr3zhcTtjaEaVnZ2OmGfOoAq0cvaXSlxqEs2456WQBf9oPHnvJEV07 AIqzo2EuDvGUh/bkFN3+djRRL9usNNplYA8jU3OQHGdeaS15ZikT+ZkQLXoHE0Oh vQJ5AP0W9Qouvc9jXRhjNNOWmgt+JiHw/oQts/LUWJ2T4UJ7wKAqGwsmgf0NbF2p aZ6AbMc7dHzCb52iLJRxlmlkJYzg449t0MgQVxTKQ5viIAdjkRBCIY2++GcYXb6k 6tUnF0Vm2kpffYUb5Lx5JoUE6IhMP0mEv3jKKwKBiCmvoC9lCUL+q+m9JKwbldOe fqowcMfAa+AiNUohIORCLjbxfa8Fq+VrvtqhFXS/+WJ2Q3o2UHe6Ie24x+uFcVRw Wy2IBO4ORbMM91iBLRxORvZTeHSCDj7aNKS6Z3hXY9hBLglc8DaJSJfXKdt7RC+k MnGmGuM2l+Sk8FTeGaj4ucTRZjz1JBkCeQDhNSV1GyShv4xeoCCoy1FmOqmZ+EWy vqxqv1PfXHDM5SwCGZWY9XokAGbWbWLjvOmO27QLNEV34pCCwxSR0aCsXI2B2rk2 3Xtvr5A7zRqtGIdEDWSoKjAGJSN9+mhQpglKI3zJQ3GBGdIPeEqzgSud5SNHu01a IaMCgYgyoxtqdWi90iE75/x+uIVGJRdHtWoL2dr8Ixu1bOMjKCR8gjneSRTqI1tA lbRH5K/jg6iccB/pQmBcIPIubF10Nv/ZQV760WK/h6ue2hOCaBLWT8EQEEfBfnp+ 9rfBfNQIQIkBFTfGIHXUUPb9sJgDP1boUxcqxr9bpKUrs1EMkUd+PrvpHIj2 -----END RSA PRIVATE KEY----- ''' privkey = rsa.PrivateKey.load_pkcs1(privkey,'PEM') decrypted_message = rsa.decrypt(encrypted_message, privkey) msg = decrypted_message.decode().split('####') product = msg[0] computernameLicense = msg[1] computername = socket.getfqdn() licenseValid = False if product.lower() == 'aion': if computernameLicense == computername: uuidlicense = msg[3] uuid = guid() if uuidlicense == uuid: current_date = datetime.now() license_expiry_date = msg[5] license_expiry_date = datetime.strptime(license_expiry_date,'%Y-%m-%d %H:%M:%S') if current_date > license_expiry_date: return False,'License Expire' else: return True,'' return False,'License Error' except Exception as e: print(e) return False,'License Error' else: return False,'Generate License' def generate_record_key(product,version): computername = socket.getfqdn() macaddress = getmac.get_mac_address() license_date = datetime.today().strftime('%Y-%m-%d %H:%M:%S') try: user = os.getlogin() except: user = 'NA' uuid = guid() msg = product+'###'+version+'###'+computername+'###'+macaddress+'###'+user+'###'+sys.platform+'###'+uuid+'###'+license_date pkeydata='''-----BEGIN RSA PUBLIC KEY----- MIIBCgKCAQEAm75ZwaepuxGJjU1Slk1+IUO2E49Hy8i9dym5FUaBRyTRH6R+GTF1 kcpd+1QinIZDMIdsmAc95Y8pTufxY30QxCkOhVASitSQWHS/IiWQHmsTJwdr38lq ZnQQloOt/iPlhcavbxu/yKFzwBmp+nM+ErDTnCBh6EGCGrw1xWF30T2IBpmpWwME oqZsFV69RzwQAw39KG1KCxi5uscrB62YPgUdlT2b4Yaa90egQhGLLVdnKvhPORiG T9omCH90Dkm1oMMQ0Y2JBLezgXa/bunSqtTBxEwzlwUAX2JJcanFYrzKy2OLxzwN RlWUXilZ4R/1RHAgUdNyKbYxZqc24MApoQIDAQAB -----END RSA PUBLIC KEY----- ''' pubkey = rsa.PublicKey.load_pkcs1(pkeydata) encrypted_message = rsa.encrypt(msg.encode(), pubkey) encrypted_message = binascii.hexlify(encrypted_message).decode() return(encrypted_message) def run(cmd): try: return subprocess.run(cmd, shell=True, capture_output=True, check=True, encoding="utf-8").stdout.strip() except Exception as e: print(e) return None def guid(): if sys.platform == 'darwin': return run( "ioreg -d2 -c IOPlatformExpertDevice | awk -F\\\" '/IOPlatformUUID/{print $(NF-1)}'", ) if sys.platform == 'win32' or sys.platform == 'cygwin' or sys.platform == 'msys': return run('wmic csproduct get uuid').split('\n')[2].strip() if sys.platform.startswith('linux'): return run('cat /var/lib/dbus/machine-id') or \ run('cat /etc/machine-id') if sys.platform.startswith('openbsd') or sys.platform.startswith('freebsd'): return run('cat /etc/hostid') or \ run('kenv -q smbios.system.uuid') def updateLicense(licensekey): license_folder = os.path.join(DATA_DIR,'License') license_folder = Path(license_folder) license_folder.mkdir(parents=True, exist_ok=True) license_file = license_folder/'license.lic' with open(license_file, "w") as fl: fl.write(licensekey) fl.close() def enterRecord(version): validLicense,msg = checklicense() if not validLicense: key = generate_record_key('AION',version) msg = {'status':msg,'key':key,'licenseKey':'','link':''} return validLicense,msg
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
edge_convert.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import platform import json import shutil import logging import sys from AionConfigManager import AionConfigManager from sklearn.externals import joblib class edgeformats: def __init__(self,deploy_path): self.deploy_path = deploy_path self.edge_deploy_path = os.path.join(deploy_path,"edge") os.mkdir(self.edge_deploy_path) def converttoedgedeployment(self,saved_model,edge_format,xtrain,model_type,iterName,iterVersion,features,profiled_data_file): if edge_format == 'onnx': from skl2onnx import convert_sklearn from skl2onnx.common.data_types import FloatTensorType xtrain = xtrain[features] initial_type = [('float_input', FloatTensorType([None, xtrain.shape[1]]))] filename = os.path.join(self.deploy_path,saved_model) loaded_model = joblib.load(filename) onx = convert_sklearn(loaded_model, initial_types=initial_type) onnx_filename = os.path.join(self.edge_deploy_path, model_type + '_' + iterName + '_' + iterVersion + '.onnx') with open(onnx_filename, "wb") as f: f.write(onx.SerializeToString()) self.createedgeruntimeFile(onnx_filename,profiled_data_file,features) def createedgeruntimeFile(self,onnx_filename,datafilepath,features): runtimefilecontent = '' runtimefilecontent += 'import pandas' runtimefilecontent += '\n' runtimefilecontent += 'import numpy' runtimefilecontent += '\n' runtimefilecontent += 'import sys' runtimefilecontent += '\n' runtimefilecontent += 'import onnxruntime as rt' runtimefilecontent += '\n' runtimefilecontent += 'def onnx_runtime_validation():' runtimefilecontent += '\n' runtimefilecontent += ' modelfile = r"'+str(onnx_filename)+'"' runtimefilecontent += '\n' runtimefilecontent += ' datafile = r"'+str(datafilepath)+'"' runtimefilecontent += '\n' runtimefilecontent += ' dataframe = pandas.read_csv(datafile)' runtimefilecontent += '\n' runtimefilecontent += ' dataframe = dataframe['+str(features)+']' runtimefilecontent += '\n' runtimefilecontent += ' df = dataframe.head(8)' runtimefilecontent += '\n' runtimefilecontent += ' dataset = df.values' runtimefilecontent += '\n' runtimefilecontent += ' sess = rt.InferenceSession(modelfile)' runtimefilecontent += '\n' runtimefilecontent += ' input_name = sess.get_inputs()[0].name' runtimefilecontent += '\n' runtimefilecontent += ' label_name = sess.get_outputs()[0].name' runtimefilecontent += '\n' runtimefilecontent += ' inputsize=sess.get_inputs()[0].shape' runtimefilecontent += '\n' runtimefilecontent += ' XYZ = dataset[:,0:inputsize[1]].astype(float)' runtimefilecontent += '\n' runtimefilecontent += ' pred_onx = sess.run([label_name], {input_name: XYZ.astype(numpy.float32)[0:8]})[0]' runtimefilecontent += '\n' runtimefilecontent += ' df[\'predictions\'] = pred_onx' runtimefilecontent += '\n' runtimefilecontent += ' result = df.to_json(orient="records")' runtimefilecontent += '\n' runtimefilecontent += ' return(result)' runtimefilecontent += '\n' runtimefilecontent += 'if __name__ == "__main__":' runtimefilecontent += '\n' runtimefilecontent += ' output = onnx_runtime_validation()' runtimefilecontent += '\n' runtimefilecontent += ' print("predictions:",output)' filename = os.path.join(self.edge_deploy_path,'onnxvalidation.py') f = open(filename, "w") f.write(str(runtimefilecontent)) f.close()
common.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import json from pathlib import Path from AION.prediction_package.imports import importModule from AION.prediction_package import utility from AION.prediction_package.utility import TAB_CHAR from importlib.metadata import version """ This file provide the functionality which is common for most of the problem types deployment. """ def main_code(): return """ class predict(): def __init__(self): self.profiler = inputprofiler() self.selector = selector() self.trainer = trainer() self.formatter = output_format() def run(self, data): try: df = self._parse_data(data) raw_df = df.copy() df = self.profiler.run(df) df = self.selector.run(df) df = self.trainer.run(df) output = self.formatter.run(raw_df, df) print("predictions:",output) return (output) except Exception as e: output = {"status":"FAIL","message":str(e).strip('"')} print("predictions:",json.dumps(output)) return (json.dumps(output)) def _parse_data(self, data): file_path = Path(data) if file_path.suffix == ".tsv": df = pd.read_csv(data,encoding='utf-8',sep='\\t',skipinitialspace = True,na_values=['-','?']) elif file_path.suffix in [".csv", ".dat"]: df=pd.read_csv(data,encoding='utf-8',skipinitialspace = True,na_values=['-','?']) elif file_path.suffix in [".gz"] and file_path.stem.endswith('.csv'): df=pd.read_csv(data,encoding='utf-8',skipinitialspace = True,na_values=['-','?']) elif file_path.suffix == ".json": with open(data,'r',encoding='utf-8') as f: jsonData = json.load(f) df = pd.json_normalize(jsonData) else: jsonData = json.loads(data) df = pd.json_normalize(jsonData) return df import sys if __name__ == "__main__": output = predict().run(sys.argv[1]) """ def profiler_code(params, indent=0): """ This will create the profiler file based on the config file. separated file is created as profiler is required for input drift also. """ imported_modules = [ {'module': 'json', 'mod_from': None, 'mod_as': None}, {'module': 'scipy', 'mod_from': None, 'mod_as': None}, {'module': 'joblib', 'mod_from': None, 'mod_as': None}, {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}, {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None} ] importer = importModule() utility.import_modules(importer, imported_modules) code = """ class inputprofiler(): """ init_code = """ def __init__(self): """ if params.get('text_features'): imported_modules.append({'module':'importlib.util'}) init_code += """ # preprocessing preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl' if not preprocess_path.exists(): raise ValueError(f'Preprocess model file not found: {preprocess_path}') self.profiler = joblib.load(preprocess_path) """ run_code = """ def run(self,df): df = df.replace(r'^\s*$', np.NaN, regex=True) """ if params.get('input_features_type'): imported_modules.append({'module':'dtype','mod_from':'numpy'}) run_code += f""" df = df.astype({params.get('input_features_type')}) """ if params.get('word2num_features'): imported_modules.append({'module':'w2n','mod_from':'word2number'}) run_code += f""" def s2n(value): try: x=eval(value) return x except: try: return w2n.word_to_num(value) except: return np.nan df[{params['word2num_features']}] = df[{params['word2num_features']}].apply(lambda x: s2n(x))""" if params.get('unpreprocessed_columns'): run_code += f""" unpreprocessed_data = df['{params['unpreprocessed_columns'][0]}'] df.drop(['{params['unpreprocessed_columns'][0]}'], axis=1,inplace=True) """ if params.get('force_numeric_conv'): run_code += f""" df[{params['force_numeric_conv']}] = df[{params['force_numeric_conv']}].apply(pd.to_numeric,errors='coerce')""" if params.get('conversion_method','').lower() == 'glove': code_text, modules = __profiler_glove_code(params) imported_modules.extend( modules) init_code += code_text elif params.get('conversion_method','').lower() == 'fasttext': init_code += __profiler_fasttext_code(params) run_code += __profiler_main_code(params) if params.get('unpreprocessed_columns'): run_code += f""" df['{params.get('unpreprocessed_columns')[0]}'] = unpreprocessed_data """ utility.import_modules(importer, imported_modules) import_code = importer.getCode() return import_code + code + init_code + run_code def __profiler_glove_code(params, indent=2): modules = [] modules.append({'module':'load_pretrained','mod_from':'text.Embedding'}) modules.append({'module':'TextProcessing','mod_from':'text'}) code = """ model_path = TextProcessing.checkAndDownloadPretrainedModel('glove') embed_size, pretrained_model = load_pretrained(model_path) self.profiler.set_params(text_process__vectorizer__external_model = pretrained_model) """ return code.replace('\n', '\n'+(indent * TAB_CHAR)), modules def __profiler_fasttext_code(params, indent=2): code = """ def get_pretrained_model_path(): try: from AION.appbe.dataPath import DATA_DIR modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextProcessing' except: modelsPath = Path('aion')/'PreTrainedModels'/'TextProcessing' if not modelsPath.exists(): modelsPath.mkdir(parents=True, exist_ok=True) return modelsPath if not importlib.util.find_spec('fasttext'): raise ValueError('fastText not installed') else: import os import fasttext import fasttext.util cwd = os.getcwd() os.chdir(get_pretrained_model_path()) fasttext.util.download_model('en', if_exists='ignore') pretrained_model = fasttext.load_model('cc.en.300.bin') os.chdir(cwd) self.profiler.set_params(text_process__vectorizer__external_model = pretrained_model) self.profiler.set_params(text_process__vectorizer__external_model_type = 'binary') """ return code.replace('\n', '\n'+(indent * TAB_CHAR)) def __profiler_main_code(params, indent=2): code = f""" df = self.profiler.transform(df) columns = {params['output_features']} if isinstance(df, scipy.sparse.spmatrix): df = pd.DataFrame(df.toarray(), columns=columns) else: df = pd.DataFrame(df, columns=columns) return df """ return code.replace('\n', '\n'+(indent * TAB_CHAR)) def feature_selector_code( params, indent=0): modules = [ {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'} ] code = """ class selector(): # this class def __init__(self): pass def run(self, df):""" code +=f""" return df[{params['output_features']}] """ return code, modules def feature_reducer_code( params, indent=0): modules = [ {'module': 'joblib', 'mod_from': None, 'mod_as': None}, {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None} ] code = f""" class selector(): def __init__(self): reducer_file = (Path(__file__).parent/"model")/"{params['reducer_file']}" if not reducer_file.exists(): raise ValueError(f'Failed to load Feature Engineering model file: {{reducer_file}}') self.model = joblib.load(reducer_file) def run(self, df): reducer_input = {params['input_features']} reducer_output = {params['output_features']} df = self.model.transform(df[reducer_input]) return pd.DataFrame(df,columns=reducer_output) """ if indent: code = code.replace('\n', '\n'+(indent * TAB_CHAR)) return code, modules def create_feature_list(config=None, target_feature=None, deploy_path=None): featurelist = [] if 'profiler' in config: if 'input_features_type' in config['profiler']: input_features = config['profiler']['input_features_type'] for x in input_features: featurelt={} featurelt['feature'] = x if x == target_feature: featurelt['Type'] = 'Target' else: if input_features[x] in ['int','int64','float','float64']: featurelt['Type'] = 'Numeric' elif input_features[x] == 'object': featurelt['Type'] = 'Text' elif input_features[x] == 'category': featurelt['Type'] = 'Category' else: featurelt['Type'] = 'Unknown' featurelist.append(featurelt) featurefile = f""" import json def getfeatures(): try: features = {featurelist} outputjson = {{"status":"SUCCESS","features":features}} output = json.dumps(outputjson) print("Features:",output) return(output) except Exception as e: output = {{"status":"FAIL","message":str(e).strip(\'"\')}} print("Features:",json.dumps(output)) return (json.dumps(output)) if __name__ == "__main__": output = getfeatures() """ with open( deploy_path/'featureslist.py', 'wb') as f: f.write( str(featurefile).encode('utf8')) def requirement_file(deploy_path,model,textFeatures,learner_type='ML'): modules = ['pandas','numpy','alibi','matplotlib','joblib','shap','ipython','category_encoders','scikit-learn','word2number','flask_restful','evidently','Flask-Cors'] requires = '' for mod in modules: requires += f"{mod}=={version(mod)}\n" if len(textFeatures) > 0: tmodules = ['spacy','nltk','textblob','demoji','beautifulsoup4','text-unidecode','pyspellchecker','contractions','protobuf'] for mod in tmodules: requires += f"{mod}=={version(mod)}\n" if model == 'Extreme Gradient Boosting (XGBoost)': mmodules = ['xgboost'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model == 'Light Gradient Boosting (LightGBM)': mmodules = ['lightgbm'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model == 'Categorical Boosting (CatBoost)': mmodules = ['catboost'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model.lower() == 'arima': mmodules = ['pmdarima'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model.lower() == 'fbprophet': mmodules = ['prophet'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model.lower() == 'lstm' or model.lower() == 'mlp' or learner_type =='DL': mmodules = ['tensorflow'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model.lower() in ['cox', 'kaplanmeierfitter']: #bug 12833 mmodules = ['lifelines'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model.lower() == 'sentencetransformer': #bug 12833 mmodules = ['sentence_transformers'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" with open( deploy_path/'requirements.txt', 'wb') as f: f.write(str(requires).encode('utf8')) def create_readme_file(deploy_path,modelfile,features): data = json.dumps([{x:x+'_value'} for x in features]) backslash_data = data.replace('"', '\\"') content = f""" ========== Files Structures ========== {modelfile} ------ Trained Model aion_prediction.py --> Python package entry point script/inputprofiler.py --> Profiling like FillNA and Category to Numeric ========== How to call the model ========== ============== From Windows Terminal ========== python aion_prediction.py "{backslash_data}" ============== From Linux Terminal ========== python aion_prediction.py "{data}" ============== Output ========== {{"status":"SUCCESS","data":[{{"Data1":"Value","prediction":"Value"}}]}} ## for single Row/Record {{"status":"SUCCESS","data":[{{"Data1":"Value","prediction":"Value"}},{{"Data1":"Value","prediction":"Value"}}]}} ## For Multiple Row/Record {{"status":"ERROR","message":"description"}} ## In Case Exception or Error """ filename = deploy_path/'readme.txt' with open(filename, 'w') as f: f.write(content) def create_util_folder(deploy_path): import tarfile ext_path = Path(__file__).parent.parent/'utilities' for x in ext_path.iterdir(): if x.suffix == '.tar': if x.name not in ['scikit_surprise-1.1.1.dist-info.tar','surprise.tar']: my_tar = tarfile.open(x) my_tar.extractall(deploy_path) my_tar.close()
model_deploy.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os,sys import platform import json import shutil import logging from pathlib import Path from prediction_package import production from prediction_package import prediction_transformation as cs class DeploymentManager: def __init__(self): self.requirementfile='' self.modelfile='' self.s2i_environmentfile='' self.selectorfile='' self.profilerfile='' self.readmepackagename='' self.pythonpackage='' self.log = logging.getLogger('eion') def include_import_file(self,learner_type,method,scoreParam,model_type,model): if((learner_type == 'DL') or (learner_type == 'TextDL')): self.modelfile += 'from tensorflow.keras.models import load_model' self.modelfile += '\n' self.modelfile += 'from tensorflow.keras import backend as K' self.modelfile += '\n' self.modelfile += 'import tensorflow as tf' self.modelfile += '\n' if (learner_type == 'ML' and model_type.lower()=='anomaly_detection' and model.lower() == 'autoencoder'): self.modelfile += 'import joblib' self.modelfile += '\n' self.modelfile += 'import os' self.modelfile += '\n' self.modelfile += 'import pandas as pd' self.modelfile += '\n' self.modelfile += 'import numpy as np' self.modelfile += '\n' self.modelfile += 'from pathlib import Path' self.modelfile += '\n' self.modelfile += 'import tensorflow as tf' self.modelfile += '\n' self.modelfile += 'from keras.models import load_model' self.modelfile += '\n' self.modelfile += 'import warnings' self.modelfile += '\n' self.modelfile += 'from sklearn.preprocessing import StandardScaler' self.modelfile += '\n' self.modelfile += 'warnings.filterwarnings("ignore")' self.modelfile += '\n' if(learner_type == 'ImageClassification'): self.modelfile += 'import os' self.modelfile += '\n' self.modelfile += 'from tensorflow.keras.models import Sequential' self.modelfile += '\n' self.modelfile += 'from tensorflow.keras.layers import Dense, Dropout, Flatten' self.modelfile += '\n' self.modelfile += 'from tensorflow.keras.preprocessing import image' self.modelfile += '\n' self.modelfile += 'import numpy as np' self.modelfile += '\n' self.modelfile += 'import tensorflow as tf' self.modelfile += '\n' self.modelfile += 'from tensorflow.keras.layers import Input' self.modelfile += '\n' self.modelfile += 'from tensorflow.keras.models import Model' self.modelfile += '\n' self.modelfile += 'from tensorflow.keras.optimizers import Adam' self.modelfile += '\n' self.modelfile += 'import cv2' self.modelfile += '\n' if(learner_type == 'objectDetection'): self.modelfile += 'import os\n' self.modelfile += 'from object_detection.utils import label_map_util\n' self.modelfile += 'from object_detection.utils import config_util\n' self.modelfile += 'from object_detection.utils import visualization_utils as viz_utils\n' self.modelfile += 'from object_detection.builders import model_builder\n' self.modelfile += 'import tensorflow as tf\n' self.modelfile += 'import numpy as np\n' self.modelfile += 'from PIL import Image\n' self.modelfile += 'import matplotlib.pyplot as plt\n' self.modelfile += 'import pandas as pd\n' self.modelfile += 'from pathlib import Path\n' if(learner_type == 'Text Similarity'): self.modelfile += 'from tensorflow.keras.models import load_model' self.modelfile += '\n' self.modelfile += 'from tensorflow.keras import backend as K' self.modelfile += '\n' self.modelfile += 'from tensorflow.keras.preprocessing.sequence import pad_sequences' self.modelfile += '\n' self.modelfile += 'from tensorflow.keras.preprocessing.text import Tokenizer' self.modelfile += '\n' self.modelfile += 'import tensorflow as tf' self.modelfile += '\n' if(model == 'Neural Architecture Search'): self.modelfile += 'from tensorflow.keras.models import load_model' self.modelfile += '\n' self.modelfile += 'from tensorflow.keras import backend as K' self.modelfile += '\n' self.modelfile += 'import tensorflow as tf' self.modelfile += '\n' self.modelfile += 'import joblib' self.modelfile += '\n' self.modelfile += 'import os' self.modelfile += '\n' self.modelfile += 'import pandas as pd' self.modelfile += '\n' self.modelfile += 'from sklearn.decomposition import LatentDirichletAllocation\n' self.modelfile += 'import numpy as np\n' self.modelfile += 'from pathlib import Path\n' if model.lower() == 'deep q network' or model.lower() == 'dueling deep q network': self.modelfile += 'from tensorflow import constant' self.modelfile += '\n' self.modelfile += 'from tf_agents.trajectories import time_step' self.modelfile += '\n' self.requirementfile += 'tensorflow==2.5.0' if model.lower() == 'lstm' or model.lower() == 'mlp': self.modelfile += 'from tensorflow.keras.models import load_model' self.modelfile += '\n' self.requirementfile += 'tensorflow==2.5.0' if(learner_type == 'Text Similarity'): self.modelfile += 'def cosine_distance(vests):' self.modelfile += '\n'; self.modelfile += ' x, y = vests' self.modelfile += '\n'; self.modelfile += ' x = K.l2_normalize(x, axis=-1)' self.modelfile += '\n'; self.modelfile += ' y = K.l2_normalize(y, axis=-1)' self.modelfile += '\n'; self.modelfile += ' return -K.mean(x * y, axis=-1, keepdims=True)' self.modelfile += '\n'; self.modelfile += 'def cos_dist_output_shape(shapes):' self.modelfile += '\n'; self.modelfile += ' shape1, shape2 = shapes' self.modelfile += '\n'; self.modelfile += ' return (shape1[0],1)' self.modelfile += '\n'; if(learner_type == 'TextDL' or learner_type == 'DL'): if(scoreParam.lower() == 'recall' or scoreParam.lower() == 'f1_score'): self.modelfile += 'def recall_m(y_true, y_pred):' self.modelfile += '\n'; self.modelfile += ' true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))' self.modelfile += '\n'; self.modelfile += ' possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))' self.modelfile += '\n'; self.modelfile += ' recall = true_positives / (possible_positives + K.epsilon())' self.modelfile += '\n'; self.modelfile += ' return recall' self.modelfile += '\n'; if(scoreParam.lower() == 'precision' or scoreParam.lower() == 'f1_score'): self.modelfile += 'def precision_m(y_true, y_pred):' self.modelfile += '\n'; self.modelfile += ' true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))' self.modelfile += '\n'; self.modelfile += ' predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))' self.modelfile += '\n'; self.modelfile += ' precision = true_positives / (predicted_positives + K.epsilon())' self.modelfile += '\n'; self.modelfile += ' return precision' self.modelfile += '\n'; if(scoreParam.lower() == 'f1_score'): self.modelfile += 'def f1_m(y_true, y_pred):' self.modelfile += '\n'; self.modelfile += ' precision = precision_m(y_true, y_pred)' self.modelfile += '\n'; self.modelfile += ' recall = recall_m(y_true, y_pred)' self.modelfile += '\n'; self.modelfile += ' return 2*((precision*recall)/(precision+recall+K.epsilon()))' self.modelfile += '\n'; if(scoreParam.lower() == 'rmse'): self.modelfile += 'def rmse_m(y_true, y_pred):' self.modelfile += '\n'; self.modelfile += ' return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))' self.modelfile += '\n'; if(scoreParam.lower() =='r2'): self.modelfile += 'def r_square(y_true, y_pred):' self.modelfile += '\n'; self.modelfile += ' SS_res = K.sum(K.square(y_true-y_pred))' self.modelfile += '\n'; self.modelfile += ' SS_tot = K.sum(K.square(y_true-K.mean(y_true)))' self.modelfile += '\n'; self.modelfile += ' return (1 - SS_res/(SS_tot+K.epsilon()))' self.modelfile += '\n'; if(learner_type.lower() in ['similarityidentification','contextualsearch']): self.modelfile += 'from pathlib import Path\n' if model_type == 'BM25': self.modelfile += 'from rank_bm25 import BM25Okapi\n' elif scoreParam == 'VectorDB Cosine': self.modelfile += 'import chromadb\n' else: self.modelfile += 'from sklearn.metrics.pairwise import cosine_similarity\n' self.pythonpackage += '========== Python Packags Requires =========' self.pythonpackage += '\n' self.pythonpackage += 'scikit-learn' self.pythonpackage += '\n' self.pythonpackage += 'scipy' self.pythonpackage += '\n' self.pythonpackage += 'numpy' self.pythonpackage += '\n' if((learner_type == 'DL') or (learner_type =='TextDL')): self.modelfile += 'import numpy as np' self.modelfile += '\n' self.requirementfile += 'scikit-learn==0.21.3' self.requirementfile += '\n' self.requirementfile += 'scipy==1.3.3' self.requirementfile += '\n' self.requirementfile += 'numpy==1.17.4' self.requirementfile += '\n' if(learner_type == 'TextML'): self.requirementfile += 'spacy==2.2.3' self.requirementfile += '\n' self.requirementfile += 'https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-2.2.0/en_core_web_sm-2.2.0.tar.gz' self.requirementfile += '\n' if(learner_type == 'DL' or learner_type == 'TextDL'): self.requirementfile += 'keras==2.3.1' self.requirementfile += '\n' self.requirementfile += 'tensorflow==2.0.0b1' self.requirementfile += '\n' if(learner_type == 'RecommenderSystem'): self.requirementfile += 'surprise' self.requirementfile += '\n' if(method == 'package'): self.modelfile += 'import surprise' self.modelfile += '\n' self.modelfile += 'import statsmodels' self.modelfile += '\n' self.requirementfile += 'statsmodels==0.10.2' self.requirementfile += '\n' def crate_readme_file(self,deploy_path,modelfile,features,method,single_file=False): self.readme='========== Files Structures ==========' self.readme+='\n' self.readme+=modelfile+' ------ Trained Model' self.readme+='\n' self.readme+='aion_prediction.py --> Python package entry point' self.readme+='\n' if not single_file: self.readme+='script/inputprofiler.py --> Profiling like FillNA and Category to Numeric' self.readme+='\n' self.readme+='script/selector.py --> Feature Selection' self.readme+='\n' self.readme+='script/trained_model.py --> Read the model file and call the prediction' self.readme+='\n' self.readme+='script/output_format.py --> Output formatter file' self.readme+='\n' self.readme+= self.pythonpackage self.readme+= '========== How to call the model ==========' self.readme+='\n' self.readme+= '============== From Windows Terminal ==========' self.readme+='\n' if method == 'optimus_package': self.readme += 'python aion_prediction.py filename.json' self.readme +='\n' self.readme += '========== Embedded Methods ==========' self.readme +='\n' self.readme += 'Function Name: predict_from_json - When input is Json Data' self.readme +='\n' self.readme += 'Function Name: predict_from_file - When input is Json File' self.readme +='\n' else: callpython = 'python aion_prediction.py "[{' for x in features: if(callpython != 'python prediction.py "[{'): callpython += ',' callpython += '\\\"'+str(x)+'\\\"'+':'+'\\\"'+str(x)+'_value'+'\\\"' callpython += '}]"' self.readme += callpython self.readme+='\n' self.readme+= '============== From Linux Terminal ==========' self.readme+='\n' callpython = 'python aion_prediction.py \'[{' temp =callpython for x in features: if(callpython != temp): callpython += ',' callpython += '"'+str(x)+'"'+':'+'"'+str(x)+'_value'+'"' callpython += '}]\'' self.readme += callpython self.readme+='\n' self.readme+= '============== Output ==========' self.readme+='\n' self.readme+= '{"status":"SUCCESS","data":[{"Data1":"Value","prediction":"Value"}]}' ## For Single Row/Record' self.readme+='\n' self.readme+= '{"status":"SUCCESS","data":[{"Data1":"Value","prediction":"Value"},{"Data1":"Value","prediction":"Value"}]} ## For Multiple Row/Record' self.readme+='\n' self.readme+= '{"status":"ERROR","message":"description"} ## In Case Exception or Error' self.readme+='\n' #print(self.readme) filename = os.path.join(deploy_path,'readme.txt') self.log.info('-------> Readme File Location: '+filename) f = open(filename, "wb") f.write(str(self.readme).encode('utf8')) f.close() def create_class(self,classname): #self.modelfile += 'class '+classname+'(object):' self.modelfile += 'class trained_model(object):' self.modelfile += '\n' def profiler_code(self,model_type,model,output_columns, features, text_feature,wordToNumericFeatures=[], deploy={},datetimeFeature=''): profiler = deploy.get('profiler',{}) if isinstance(features, str): features = features.split(',') code = f""" import scipy import joblib import numpy as np import pandas as pd from pathlib import Path """ if text_feature: code += """ import importlib.util\n""" if wordToNumericFeatures: code += """ from word2number import w2n def s2n(value): try: x=eval(value) return x except: try: return w2n.word_to_num(value) except: return np.nan """ if 'code' in deploy.get('preprocess',{}).keys(): code += deploy['preprocess']['code'] if profiler.get('conversion_method','').lower() == 'glove': code += """ class inputprofiler(object): def __init__(self): self.model = None preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl' if preprocess_path.exists(): self.model = joblib.load(preprocess_path) from text.Embedding import load_pretrained from text import TextProcessing model_path = TextProcessing.checkAndDownloadPretrainedModel('glove') embed_size, loaded_model = load_pretrained(model_path) self.model.set_params(text_process__vectorizer__external_model = loaded_model) else: raise ValueError('Preprocess model not found') def apply_profiler(self,df): df = df.replace(r'^\s*$', np.NaN, regex=True) """ elif profiler.get('conversion_method','').lower() == 'fasttext': code += """ def get_pretrained_model_path(): try: from AION.appbe.dataPath import DATA_DIR modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextProcessing' except: modelsPath = Path('aion')/'PreTrainedModels'/'TextProcessing' if not modelsPath.exists(): modelsPath.mkdir(parents=True, exist_ok=True) return modelsPath class inputprofiler(object): def __init__(self): self.model = None preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl' if preprocess_path.exists(): self.model = joblib.load(preprocess_path) if not importlib.util.find_spec('fasttext'): raise ValueError('fastText not installed') else: import os import fasttext import fasttext.util cwd = os.getcwd() os.chdir(get_pretrained_model_path()) fasttext.util.download_model('en', if_exists='ignore') loaded_model = fasttext.load_model('cc.en.300.bin') os.chdir(cwd) self.model.set_params(text_process__vectorizer__external_model = loaded_model) self.model.set_params(text_process__vectorizer__external_model_type = 'binary') else: raise ValueError('Preprocess model not found') def apply_profiler(self,df): df = df.replace(r'^\s*$', np.NaN, regex=True) """ else: code += """ class inputprofiler(object): def __init__(self): self.model = None preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl' if preprocess_path.exists(): self.model = joblib.load(preprocess_path) else: raise ValueError('Preprocess model not found') def apply_profiler(self,df): df = df.replace(r'^\s*$', np.NaN, regex=True) """ if 'code' in deploy.get('preprocess',{}).keys(): code += " df = preprocess( df)\n" if wordToNumericFeatures: code += f""" df[{wordToNumericFeatures}] = df[{wordToNumericFeatures}].apply(lambda x: s2n(x))""" if profiler.get('unpreprocessed_columns'): code += f""" unpreprocessed_data = df['{profiler['unpreprocessed_columns'][0]}'] df.drop(['{profiler['unpreprocessed_columns'][0]}'], axis=1,inplace=True) """ if profiler.get('force_numeric_conv'): code += f""" df[{profiler['force_numeric_conv']}] = df[{profiler['force_numeric_conv']}].apply(pd.to_numeric,errors='coerce') """ code += f""" if self.model: df = self.model.transform(df)""" code += f""" columns = {output_columns} if isinstance(df, scipy.sparse.spmatrix): df = pd.DataFrame(df.toarray(), columns=columns) else: df = pd.DataFrame(df, columns=columns) """ ##The below if loop for avoiding unpreprocessed column variable storing which is not used for anomaly detection if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na': pass else: if profiler.get('unpreprocessed_columns'): code += f""" df['{profiler.get('unpreprocessed_columns')[0]}'] = unpreprocessed_data """ if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na': ##This below set_index is wrong, because we drop datetimefeature before profiling and doing set_index. So commented now. # code += f""" # df.set_index('{datetimeFeature}', inplace=True)""" code += f""" return(df,'{datetimeFeature}')\n""" else: code += f""" return(df)""" return code def no_profiling_code(self, features): if isinstance(features, str): features = features.split(',') return f""" import pandas as pd import numpy as np class inputprofiler(object): def apply_profiler(self,df): df = df.replace(r'^\s*$', np.NaN, regex=True) return df[{features}] """ def create_profiler_file(self,learner_type,deploy_path,profiler,features,numericToLabel_json,column_merge_flag,text_features,preprocessing_pipe,firstDocFeature,secondDocFeature,normalizer,normFeatures,wordToNumericFeatures,conversion_method,model_type,preprocess_pipe,preprocess_out_columns, label_encoder,model, config=None,datetimeFeature=''): filename = str(Path(deploy_path)/'script'/'inputprofiler.py') if 'profiler' in config: if model_type == 'BM25': code = self.profiler_code(model_type,model,['tokenize'],features, text_features,config['profiler']['word2num_features']) elif model == 'KaplanMeierFitter': code = self.no_profiling_code(features) elif model.lower() in ['arima', 'fbprophet']: #task 12627 code = self.no_profiling_code('noofforecasts') else: code = self.profiler_code(model_type,model,config['profiler']['output_features'],features, text_features,config['profiler']['word2num_features'],config,datetimeFeature) if code: with open(filename,'w',encoding="utf-8") as f: f.write(code) self.log.info('-------> Profiler File Location :'+filename) return self.profilerfile += 'import pandas as pd' self.profilerfile += '\n' self.profilerfile += 'import joblib' self.profilerfile += '\n' self.profilerfile += 'import os' self.profilerfile += '\n' self.profilerfile += 'from word2number import w2n' self.profilerfile += '\n' self.profilerfile += 'import numpy as np' self.profilerfile += '\nfrom pathlib import Path\n' #print("1") #print(profiler) if(learner_type == 'Text Similarity' or len(text_features) > 0): self.profilerfile += 'from text import TextProcessing' self.profilerfile += '\n' self.profilerfile += 'def textCleaning(textCorpus):' self.profilerfile += '\n' self.profilerfile += ' textProcessor = TextProcessing.TextProcessing()' self.profilerfile += '\n' self.profilerfile += ' textCorpus = textProcessor.transform(textCorpus)' self.profilerfile += '\n' self.profilerfile += ' return(textCorpus)' self.profilerfile += '\n' self.profilerfile += 'class inputprofiler(object):' self.profilerfile += '\n' self.profilerfile += ' def s2n(self,value):' self.profilerfile += '\n' self.profilerfile += ' try:' self.profilerfile += '\n' self.profilerfile += ' x=eval(value)' self.profilerfile += '\n' self.profilerfile += ' return x' self.profilerfile += '\n' self.profilerfile += ' except:' self.profilerfile += '\n' self.profilerfile += ' try:' self.profilerfile += '\n' self.profilerfile += ' return w2n.word_to_num(value)' self.profilerfile += '\n' self.profilerfile += ' except:' self.profilerfile += '\n' self.profilerfile += ' return np.nan ' self.profilerfile += '\n' self.profilerfile += ' def apply_profiler(self,df):' self.profilerfile += '\n' if(len(wordToNumericFeatures) > 0): for w2nFeature in wordToNumericFeatures: if w2nFeature not in features: continue self.profilerfile += " df['"+w2nFeature+"']=df['"+w2nFeature+"'].apply(lambda x: self.s2n(x))" self.profilerfile += '\n' self.profilerfile += " df = df.replace(r'^\s*$', np.NaN, regex=True)" self.profilerfile += '\n' self.profilerfile += ' try:' self.profilerfile += '\n' self.profilerfile += ' df.dropna(how="all",axis=1,inplace=True)' self.profilerfile += '\n' self.profilerfile += ' except:' self.profilerfile += '\n' self.profilerfile += ' df.fillna(0)' self.profilerfile += '\n' if model_type.lower() != 'timeseriesforecasting': #task 11997 self.profilerfile += ' preprocess_path = Path(__file__).parent.parent/"model"/"preprocess_pipe.pkl"\n' self.profilerfile += ' if preprocess_path.exists():\n' self.profilerfile += ' model = joblib.load(preprocess_path)\n' if model_type.lower()=='anomaly_detection' and model.lower() == 'autoencoder': self.profilerfile += f" df[{features}] = model.transform(df[{features}])\n" else: self.profilerfile += f" df = model.transform(df)\n" if 'operation' in profiler: y = profiler['operation'] for action in y: feature = action['feature'] #if feature not in features: # continue operation = action['Action'] if(operation == 'Drop'): self.profilerfile += " if '"+feature+"' in df.columns:" self.profilerfile += '\n' self.profilerfile += " df.drop(columns=['"+feature+"'],inplace = True)" self.profilerfile += '\n' if(operation == 'FillValue'): self.profilerfile += " if '"+feature+"' in df.columns:" self.profilerfile += '\n' fvalue = action['value'] self.profilerfile += " df['"+feature+"'] = df['"+feature+"'].fillna(value='"+fvalue+"')" self.profilerfile += '\n' if(operation == 'Encoder'): value = action['value'] value = value.replace("\n", "\\n") self.profilerfile += " if '"+feature+"' in df.columns:" self.profilerfile += '\n' self.profilerfile += " le_dict="+str(value) self.profilerfile += '\n' self.profilerfile += " df['"+feature+"'] = df['"+feature+"'].apply(lambda x: le_dict.get(x,-1))" self.profilerfile += '\n' self.profilerfile += " if -1 in df['"+feature+"'].values:" self.profilerfile += '\n' self.profilerfile += " raise Exception('Category value of "+feature+" not present in training data')" self.profilerfile += '\n' if 'conversion' in profiler: catergoryConverton = profiler['conversion'] #print(catergoryConverton) if (catergoryConverton['categoryEncoding'].lower() in ['targetencoding','onehotencoding']) and ('features' in catergoryConverton): self.profilerfile += " encoder = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','categoryEncoder.pkl'))" self.profilerfile += '\n' self.profilerfile += " CategoryFeatures = "+str(catergoryConverton['features']) self.profilerfile += '\n' if catergoryConverton['categoryEncoding'].lower() == 'onehotencoding': self.profilerfile += " transformed_data = encoder.transform(df[CategoryFeatures]).toarray()" self.profilerfile += '\n' self.profilerfile += " feature_labels = encoder.get_feature_names(CategoryFeatures)" self.profilerfile += '\n' self.profilerfile += " transformed_data = pd.DataFrame(transformed_data,columns=feature_labels) " self.profilerfile += '\n' else: self.profilerfile += " transformed_data = encoder.transform(df[CategoryFeatures])" self.profilerfile += '\n' self.profilerfile += " dataColumns=list(df.columns)" self.profilerfile += '\n' self.profilerfile += " nonNormFeatures=list(set(dataColumns) - set(CategoryFeatures))" self.profilerfile += '\n' self.profilerfile += " dataArray=df[nonNormFeatures]" self.profilerfile += '\n' self.profilerfile += " df = pd.concat([dataArray, transformed_data],axis=1)" self.profilerfile += '\n' y = json.loads(numericToLabel_json) for feature_details in y: feature = feature_details['feature'] if feature not in features: continue label = feature_details['Labels'] bins = feature_details['Bins'] self.profilerfile += " if '"+feature+"' in df.columns:" self.profilerfile += '\n' self.profilerfile += " cut_bins="+str(bins) self.profilerfile += '\n' self.profilerfile += " cut_labels="+str(label) self.profilerfile += '\n' self.profilerfile += " df['"+feature+"'] = pd.cut(df['"+feature+"'],bins=cut_bins,labels=cut_labels)" self.profilerfile += '\n' self.profilerfile += " df['"+feature+"'] = df['"+feature+"'].fillna(value=0)" self.profilerfile += '\n' if(len(text_features) > 0): if(len(text_features) > 1): self.profilerfile += ' merge_features = '+str(text_features) self.profilerfile += '\n' self.profilerfile += ' df[\'combined\'] = df[merge_features].apply(lambda row: \' \'.join(row.values.astype(str)), axis=1)' self.profilerfile += '\n' self.profilerfile += ' features = [\'combined\']' self.profilerfile += '\n' else: self.profilerfile += " features = "+str(text_features) self.profilerfile += '\n' if model_type == 'BM25': self.profilerfile += """\ df_text = df[features[0]] pipe = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','{preprocessing_pipe}')) df['tokenize'] = pipe.transform(df_text)\n""".format(preprocessing_pipe=preprocessing_pipe) elif conversion_method == 'sentenceTransformer': self.profilerfile += """\ df_text = df[features[0]] from sentence_transformers import SentenceTransformer model = SentenceTransformer(\'sentence-transformers/msmarco-distilroberta-base-v2\') df_vect = model.encode(df_text) for empCol in {text_features}: df = df.drop(columns=[empCol]) if isinstance(df_vect, np.ndarray): df1 = pd.DataFrame(df_vect) else: df1 = pd.DataFrame(df_vect.toarray(),columns = pipe.named_steps[\'vectorizer\'].get_feature_names()) df1 = df1.add_suffix(\'_vect\') df = pd.concat([df, df1],axis=1)\n""".format(preprocessing_pipe=preprocessing_pipe, text_features=text_features) else: self.profilerfile += """\ df_text = df[features[0]] pipe = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','{preprocessing_pipe}')) df_vect=pipe.transform(df_text) for empCol in {text_features}: df = df.drop(columns=[empCol]) if isinstance(df_vect, np.ndarray): df1 = pd.DataFrame(df_vect) else: df1 = pd.DataFrame(df_vect.toarray(),columns = pipe.named_steps[\'vectorizer\'].get_feature_names()) df1 = df1.add_suffix(\'_vect\') df = pd.concat([df, df1],axis=1)\n""".format(preprocessing_pipe=preprocessing_pipe, text_features=text_features) if(learner_type == 'Text Similarity'): self.profilerfile += ' df[\''+firstDocFeature+'\'] = textCleaning(df[\''+firstDocFeature+'\'])' self.profilerfile += '\n' self.profilerfile += ' df[\''+secondDocFeature+'\'] = textCleaning(df[\''+secondDocFeature+'\'])' self.profilerfile += '\n' if len(normFeatures) > 0 and normalizer != '': self.profilerfile += " normFeatures = "+str(normFeatures) self.profilerfile += '\n' self.profilerfile += ' normalizepipe = joblib.load(os.path.join(os.path.dirname(os.path.abspath(__file__)),\'..\',\'model\',\''+normalizer+'\'))' self.profilerfile += '\n' self.profilerfile += ' dataColumns=list(df.columns)' self.profilerfile += '\n' self.profilerfile += ' nonNormFeatures=list(set(dataColumns) - set(normFeatures))' self.profilerfile += '\n' self.profilerfile += ' dataframe=df[normFeatures]' self.profilerfile += '\n' self.profilerfile += ' transDf = normalizepipe.transform(dataframe)' self.profilerfile += '\n' self.profilerfile += ' nontransDF=df[nonNormFeatures].values' self.profilerfile += '\n' self.profilerfile += ' dataColumns=normFeatures+nonNormFeatures' self.profilerfile += '\n' self.profilerfile += ' scaledDf = pd.DataFrame(np.hstack((transDf, nontransDF)),columns=dataColumns)' self.profilerfile += '\n' self.profilerfile += ' df=scaledDf' self.profilerfile += '\n' else: self.profilerfile += ' df=df.dropna()\n' self.profilerfile += ' return(df)' filename = os.path.join(deploy_path,'script','inputprofiler.py') self.log.info('-------> Profiler File Location :'+filename) f = open(filename, "w",encoding="utf-8") f.write(str(self.profilerfile)) f.close() def isEnglish(self, s): try: s.encode(encoding='utf-8').decode('ascii') except UnicodeDecodeError: return False else: return True def create_selector_file(self,deploy_path,features,pcaModel_pickle_file,bpca_features,apca_features,textFeatures,nonNumericFeatures,numericalFeatures,profiler,targetFeature, model_type,model,config=None): cs.create_selector_file(self,deploy_path,features,pcaModel_pickle_file,bpca_features,apca_features,textFeatures,nonNumericFeatures,numericalFeatures,profiler,targetFeature, model_type,model,config) def create_init_function_for_regression(self,modelfile): self.modelfile += ' def __init__(self):' self.modelfile += '\n' self.modelfile += " self.model = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))" self.modelfile += '\n' def create_init_function_for_classification(self,modelfile,classes,learner_type,scoreParam,loss_matrix,optimizer,preprocessing_pipe,modelName,model_type,imageconfig): cs.create_init_function_for_classification(self,modelfile,classes,learner_type,scoreParam,loss_matrix,optimizer,preprocessing_pipe,modelName,model_type,imageconfig) def create_predict_proba(self,learner_type,method): self.modelfile += ' def predict(self,X,features_names):' self.modelfile += '\n' self.modelfile += ' return self.model.predict_proba(X)' def create_forcast(self,method,no_of_prediction): self.modelfile += ' def predict(self,X,features_names):' self.modelfile += '\n' self.modelfile += ' no_of_prediction = '+str(no_of_prediction) self.modelfile += '\n' self.modelfile += ' lag_order = self.model.k_ar' self.modelfile += '\n' self.modelfile += ' return self.model.forecast(X.values[-lag_order:],steps=no_of_prediction)' def create_predict(self,learner_type,method,model,model_type,threshold,firstDocFeature,secondDocFeature,padding_length,optimizationmethod,sessonal_freq,additional_regressors,feature,modelFeatures,indexFeature,lag_order,scalertransformationFile,datetimeFeature,scoreParam=None): scorePrm = scoreParam cs.create_predict(self,learner_type,method,model,model_type,threshold,firstDocFeature,secondDocFeature,padding_length,optimizationmethod,sessonal_freq,additional_regressors,feature,modelFeatures,indexFeature,lag_order,scalertransformationFile,datetimeFeature,scorePrm) def save_model_deploy(self,outputfolder,modelname): #filename = outputfolder+modelname+'.py' filename = os.path.join(outputfolder,'script','trained_model.py') self.log.info('-------> Model File Location :'+filename) f = open(filename, "w",encoding="utf-8") f.write(str(self.modelfile)) f.close() def create_TextCleaner(self,outputfolder): profilerPath = os.path.join(outputfolder,'profiler') try: os.makedirs(profilerPath) except OSError: self.log.info("ProfilePath Folder Already Exists") try: textprofileFileLocation = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','profiler','textDataProfiler.py') initFileLocation = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','profiler','__init__.py') shutil.copy2(textprofileFileLocation,profilerPath) shutil.copy2(initFileLocation,profilerPath) ''' if(platform.system() == 'Windows'): shutil.copy2(os.path.dirname(os.path.abspath(__file__))+'\\..\\profiler\\textDataProfiler.py',profilerPath) shutil.copy2(os.path.dirname(os.path.abspath(__file__))+'\\..\\profiler\\__init__.py',profilerPath) else: shutil.copy2(os.path.dirname(os.path.abspath(__file__))+'/../profiler/textDataProfiler.py',profilerPath) shutil.copy2(os.path.dirname(os.path.abspath(__file__))+'/../profiler/__init__.py',profilerPath) ''' except OSError: self.log.info("Copy to Profiler Path Failed") def listToString(self,s): str1='[' for feature in s: if(str1 != '['): str1 += ',' str1 += '"'+feature+'"' str1+=']' return str1 def print_files(self): self.log.info(self.modelfile) def create_util_folder(self, deploy_path,learner_type): import tarfile ext_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..','utilities')) for x in os.listdir(ext_path): if x.endswith('.tar'): if x not in ['scikit_surprise-1.1.1.dist-info.tar','surprise.tar']: tarPackage = os.path.join(ext_path, x) my_tar = tarfile.open(tarPackage) my_tar.extractall(deploy_path) my_tar.close() else: if learner_type == 'RecommenderSystem': tarPackage = os.path.join(ext_path, x) my_tar = tarfile.open(tarPackage) my_tar.extractall(deploy_path) my_tar.close() def deploy_model(self,deploy_name,deployJson,learner_type,model_type,model,scoreParam,saved_model,deploy_path,features,profiler,datalocation,output_label,column_merge_flag,textFeatures,numericalFeatures,nonNumericFeatures,preprocessing_pipe,numericToLabel_json,threshold,loss_matrix,optimizer,firstDocFeature,secondDocFeature,padding_length,trained_data_file,dictDiffCount,targetFeature,normalizer,normFeatures,pcaModel_pickle_file,bpca_features,apca_features,optimizationmethod,deployFolder,iterName,iterVersion,wordToNumericFeatures,imageconfig,sessonal_freq,additional_regressors,grouperbyjson,rowfilterexpression,xtrain,profiled_data_file,conversion_method,modelFeatures,indexFeature,lag_order,scalertransformationFile,no_of_prediction,preprocess_pipe,preprocess_out_columns, label_encoder,datetimeFeature,usecaseLocation,config=None): try: serviceName = '{}{}{}'.format(iterName, '_' if iterVersion != '' else '', iterVersion) self.log.info('-------> Deploy Location :'+deploy_path) if production.is_supported(model_type.lower()): if learner_type == 'Text Similarity': coder = production.get_deployer(learner_type) coder.create_file(deploy_path, preprocessing_pipe, saved_model, firstDocFeature, secondDocFeature) elif model_type.lower() in ['classification', 'regression','clustering','timeseriesforecasting']: params = {} params['usecase_name']= iterName params['usecase_ver']= iterVersion params['features']={} params['features']['input_feat'] = config['profiler']['input_features'] params['features']['target_feat'] = targetFeature params['features']['text_feat'] = textFeatures params['paths']={} params['paths']['deploy'] = Path(deploy_path) params['paths']['usecase'] = params['paths']['deploy'].parent params['profiler']=config['profiler'] if 'code' in config.get('preprocess',{}).keys(): params['profiler']['preprocess']=config['preprocess'] params['selector']={} params['selector']['reducer']=True if pcaModel_pickle_file else False params['selector']['reducer_file']=pcaModel_pickle_file if pcaModel_pickle_file: params['selector']['input_features']=bpca_features params['selector']['output_features']=apca_features else: params['selector']['input_features']=config['profiler']['input_features'] params['selector']['output_features']=features params['training']={} params['training']['algo']= model params['training']['model_file']=saved_model if model_type.lower() == 'timeseriesforecasting': if params['training']['algo'] in ['LSTM','MLP','ENCODER_DECODER_LSTM_MVI_UVO']: params['training']['lag_order'] = int(lag_order) params['training']['scaler_file'] = Path(scalertransformationFile).name elif params['training']['algo'] == 'VAR': params['training']['dictDiffCount'] = dictDiffCount params['training']['no_of_prediction'] = no_of_prediction elif params['training']['algo'] == 'FBPROPHET': params['training']['sessonal_freq'] = sessonal_freq params['training']['additional_regressors'] = additional_regressors self.log.info(params) deployer = production.get_deployer(model_type.lower(), params=params) deployer.run( ) self.log.info('Status:- |... Model deployment files created') self.log.info('Status:- |... Model deployment completed') return else: # for output_formatter.py from prediction_package.output_formatter import outputFormatter outputObj = outputFormatter() outputObj.crate_output_format_file(deploy_path, learner_type, model_type, model, output_label, threshold, trained_data_file, dictDiffCount, targetFeature, features,datetimeFeature) #for aion_predict.py from prediction_package.aion_prediction import aionPrediction predictionObj = aionPrediction() # print(deploy_path) predictionObj.create_prediction_file(deploy_name, deploy_path, learner_type, grouperbyjson,rowfilterexpression,model_type,datetimeFeature) # for aion_service.py predictionObj.create_model_service(deploy_path, serviceName, model_type) # for aion_publish.py predictionObj.create_publish_service(usecaseLocation, iterName, iterVersion, model_type) if learner_type.lower()=="recommendersystem": # Task 11190--- #For recommender system from prediction_package.recommender_code import generate_recommender_code generate_recommender_code(deploy_path) return #self.create_TextCleaner(deploy_path) if(len(textFeatures) > 0): self.create_TextCleaner(deploy_path) self.include_import_file(learner_type,deployJson['method'],scoreParam, model_type,model) if((learner_type == 'TS' and model.lower() not in ['lstm','mlp','var']) or learner_type == 'RecommenderSystem'): features=[] self.create_class(deploy_name) if len(bpca_features) != 0: self.create_profiler_file(learner_type,deploy_path,profiler,bpca_features,numericToLabel_json,column_merge_flag,textFeatures,preprocessing_pipe,firstDocFeature,secondDocFeature,normalizer,normFeatures,wordToNumericFeatures,conversion_method,model_type,preprocess_pipe,preprocess_out_columns, label_encoder, model, config,datetimeFeature) else: self.create_profiler_file(learner_type,deploy_path,profiler,features,numericToLabel_json,column_merge_flag,textFeatures,preprocessing_pipe,firstDocFeature,secondDocFeature,normalizer,normFeatures,wordToNumericFeatures,conversion_method,model_type,preprocess_pipe,preprocess_out_columns, label_encoder, model, config,datetimeFeature) self.create_selector_file(deploy_path,features,pcaModel_pickle_file,bpca_features,apca_features,textFeatures,nonNumericFeatures,numericalFeatures,profiler,targetFeature,model_type, model,config) self.create_init_function_for_classification(saved_model,'classes',learner_type,scoreParam,loss_matrix,optimizer,preprocessing_pipe,model,model_type,imageconfig) except Exception as e: print(e) import traceback exception_type, exception_object, exception_traceback = sys.exc_info() filename = exception_traceback.tb_frame.f_code.co_filename line_number = exception_traceback.tb_lineno self.log.info("Exception type: ", exception_type) self.log.info("File name: ", filename) self.log.info("Line number: ", line_number) self.log.info("multivariate model build error traceback: \n"+str(traceback.print_exc())) raise Exception(e) #print(model) if(model.lower() == 'var'): self.log.info("Create Forecast Function") self.create_forcast(deployJson['method'],no_of_prediction) else: self.create_predict(learner_type,deployJson['method'],model,model_type,threshold,firstDocFeature,secondDocFeature,padding_length,optimizationmethod,sessonal_freq,additional_regressors,features,modelFeatures,indexFeature,lag_order,scalertransformationFile,datetimeFeature,scoreParam) self.save_model_deploy(deploy_path,deploy_name) if(len(textFeatures) > 0): if model_type.lower() == 'classification' or model_type.lower() == 'regression' or model_type.lower() == 'timeseriesforecasting': predictionObj.create_text_drift_file(deploy_path,textFeatures,targetFeature,model_type) if model_type.lower() == 'classification': predictionObj.create_classification_text_performance_file(deploy_path,textFeatures,targetFeature) elif model_type.lower() == 'regression': predictionObj.create_regression_text_performance_file(deploy_path,textFeatures,targetFeature) else: if model_type.lower() == 'classification' or model_type.lower() == 'regression' or model_type.lower() == 'timeseriesforecasting': #task 11997 predictionObj.create_drift_file(deploy_path,features,targetFeature,model_type) if model_type.lower() == 'classification': predictionObj.create_classification_performance_file(deploy_path,features,targetFeature) elif model_type.lower() == 'regression': predictionObj.create_regression_performance_file(deploy_path,features,targetFeature) self.log.info('Status:- |... Model deployment files created') self.crate_readme_file(deploy_path,saved_model,features,deployJson['method']) from prediction_package.requirements import requirementfile requirementfile(deploy_path,model,textFeatures,learner_type) os.chdir(deploy_path) textdata = False if(learner_type == 'Text Similarity' or len(textFeatures) > 0): textdata = True self.create_util_folder(deploy_path,learner_type) self.log.info('Status:- |... Model deployment completed') def deployTSum(self,deploy_path,preTrainedModellocation): def create_predict(preTrainedModellocation): text = f""" import sys import json def predict(data): try: import pandas as pd import numpy as np from pathlib import Path keywordsFile =Path(__file__).parent/'data'/'keywordDataBase.csv' outputSumFile =Path(__file__).parent/'data'/'summarizedOutput.csv' fileName=data #print("fileName---",fileName) inputDataFileFrame = pd.DataFrame() inputDataFileFrame['Sentences']="" rowIndex=0 if fileName.endswith(".pdf"): from pypdf import PdfReader reader = PdfReader(fileName) number_of_pages = len(reader.pages) text="" textOutputForFile="" OrgTextOutputForFile="" for i in range(number_of_pages) : page = reader.pages[i] text1 = page.extract_text() text=text+text1 import nltk tokens = nltk.sent_tokenize(text) for sentence in tokens: sentence=sentence.replace("\\n", " ") if (len(sentence.split()) < 4 ) or (len(str(sentence.split(',')).split()) < 8)or (any(chr.isdigit() for chr in sentence)) : continue inputDataFileFrame.at[rowIndex,'Sentences']=str(sentence.strip()) rowIndex=rowIndex+1 if fileName.endswith(".txt"): data=[] with open(fileName, "r",encoding="utf-8") as f: data.append(f.read()) str1 = "" for ele in data: str1 += ele sentences=str1.split(".") count=0 for sentence in sentences: count += 1 inputDataFileFrame.at[rowIndex,'Sentences']=str(sentence.strip()) rowIndex=rowIndex+1 inputDataFileFrame['LabelByKw']=0 #print(inputDataFileFrame) keywordsFileFrame=pd.read_csv(keywordsFile,encoding='utf-8') Keyword_list = keywordsFileFrame['Keyword'].tolist() for i in inputDataFileFrame.index: for x in Keyword_list: if (str(inputDataFileFrame["Sentences"][i])).lower().find(x) != -1: inputDataFileFrame['LabelByKw'][i]=1 break import pickle from sklearn.preprocessing import LabelEncoder pkl_filename='classificationModel.sav' pkl_filename =Path(__file__).parent/'model'/'classificationModel.sav' with open(pkl_filename, 'rb') as file: pickle_model = pickle.load(file) testsample=inputDataFileFrame[["Sentences"]] labelencoder = LabelEncoder() testsample["Sentences"] = labelencoder.fit_transform(testsample["Sentences"]) y_predicted = pickle_model.predict_proba(testsample) df=pd.DataFrame({{"SectionName":np.nan,"Sentences":np.nan, "Predicted_Prob":y_predicted[:,1]}}) df['LabelByModel']=df['Predicted_Prob'].apply(lambda x: 0 if x <= 0.5 else 1 ) inputDataFileFrame['LabelByModel']= df['LabelByModel'] textToSum="" for i in inputDataFileFrame.index: if (inputDataFileFrame['LabelByModel'][i] or inputDataFileFrame['LabelByKw'][i]) : textToSum=textToSum+" "+inputDataFileFrame["Sentences"][i] stdir=r"{preTrainedModellocation}" stdir = stdir.replace('\\\\', '\\\\\\\\') from transformers import AutoTokenizer, AutoModelForSeq2SeqLM modelbert = AutoModelForSeq2SeqLM.from_pretrained(stdir,local_files_only=True) tokenizer = AutoTokenizer.from_pretrained(stdir,local_files_only=True) inputs = tokenizer("summarize: " + textToSum, return_tensors="pt", max_length=512, truncation=True) outputs = modelbert.generate(inputs["input_ids"], max_length=512, min_length=140, length_penalty=2.0, num_beams=4, early_stopping=True) summarizedOutputOfSection= tokenizer.decode(outputs[0]) summarizedOutputOfSection=summarizedOutputOfSection.replace("</s>","") summarizedOutputOfSection=summarizedOutputOfSection.replace("<s>","") sumDatadata = [summarizedOutputOfSection] df = pd.DataFrame(sumDatadata, columns=['textSum']) df.to_csv(outputSumFile,encoding='utf-8') outputjson = {{"status":"SUCCESS","msg":"Press Download button to download summarized output","data":summarizedOutputOfSection}} print("predictions:",json.dumps(outputjson)) return (json.dumps(outputjson)) except KeyError as e: output = {{"status":"FAIL","message":str(e).strip('"')}} print("predictions:",json.dumps(output)) return (json.dumps(output)) except Exception as e: output = {{"status":"FAIL","message":str(e).strip('"')}} print("predictions:",json.dumps(output)) return (json.dumps(output)) if __name__ == "__main__": output = predict(sys.argv[1]) """ return text deploy_path = Path(deploy_path) aion_prediction = deploy_path/'aion_predict.py' with open(aion_prediction, 'w') as f: f.write(create_predict(preTrainedModellocation))
recommender_code.py
#task 11190: Item based Recommender system---Usnish import os def generate_recommender_code(deployPath): code = """ import pandas as pd import numpy as np import os ITEMID = 'itemId' DATA_FOLDER = 'data' USER_ITEM_MATRIX = 'user_item_matrix.csv' ITEM_SIMILARITY_MATRIX = 'item_similarity_matrix.csv' RATING = 'rating' SIMILARITY_SCORE = 'similarity_score' class collaborative_filter(object): def __init__(self): self.matrix = pd.read_csv(os.path.join(os.path.dirname(__file__), '..', DATA_FOLDER, USER_ITEM_MATRIX),index_col=0) self.matrix.index.name = ITEMID self.item_similarity_cosine = pd.read_csv(os.path.join(os.path.dirname(__file__), '..', DATA_FOLDER, ITEM_SIMILARITY_MATRIX)) self.item_similarity_cosine.index.name = ITEMID self.item_similarity_cosine.columns.name = ITEMID def item_based_rec(self,picked_userid, number_of_recommendations,number_of_similar_items=5): import operator if not isinstance(picked_userid,str): picked_userid = str(picked_userid) if picked_userid not in self.matrix.columns: raise KeyError("UserID Does Not Exist") # Movies that the target user has not watched try: picked_userid_unwatched = pd.DataFrame(self.matrix[picked_userid].isna()).reset_index() picked_userid_unwatched = picked_userid_unwatched[picked_userid_unwatched[picked_userid] == True][ITEMID].values.tolist() # Movies that the target user has watched picked_userid_watched = pd.DataFrame(self.matrix[picked_userid].dropna(axis=0, how='all') \ .sort_values(ascending=False)) \ .reset_index() \ .rename(columns={picked_userid: 'rating'}) # Dictionary to save the unwatched movie and predicted rating pair rating_prediction = {} # Loop through unwatched movies for picked_movie in picked_userid_unwatched: if not isinstance(picked_movie,str): picked_movie = str(picked_movie) # Calculate the similarity score of the picked movie with other movies try: picked_movie_similarity_score = self.item_similarity_cosine[[picked_movie]].reset_index().rename( columns={picked_movie: SIMILARITY_SCORE}) # Rank the similarities between the picked user watched movie and the picked unwatched movie. picked_userid_watched_similarity = pd.merge(left=picked_userid_watched, right=picked_movie_similarity_score, on=ITEMID, how='inner') \ .sort_values(SIMILARITY_SCORE, ascending=False)[ :number_of_similar_items] # Calculate the predicted rating using weighted average of similarity scores and the ratings from picked user try: predicted_rating = round(np.average(picked_userid_watched_similarity[RATING],weights=picked_userid_watched_similarity[SIMILARITY_SCORE]), 6) except Exception as e: predicted_rating = 0 # Save the predicted rating in the dictionary rating_prediction[picked_movie] = predicted_rating except Exception as e: rating_prediction[picked_movie] = 0 # Return the top recommended movies return sorted(rating_prediction.items(), key=operator.itemgetter(1), reverse=True)[:number_of_recommendations] except Exception as e: print(e) raise KeyError(str(e)) def predict(self,X): predictions = [] for index,row in X.iterrows(): score = self.item_based_rec(int(row["uid"]),int(row["numberOfRecommendation"])) df = pd.DataFrame(score,columns=['ItemId','Ratings']) predictions.append(df) return predictions""" filename = os.path.join(deployPath, 'script', 'item_recommendation.py') # print(deploy_path) f = open(filename, "wb") f.write(str(code).encode('utf8')) f.close()
aion_prediction.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import platform import json import shutil import logging class aionPrediction: def __init__(self): self.log = logging.getLogger('eion') def create_optimus_prediction_file (self,classname,deploy_path,learner_type): self.predictionFile = 'import warnings' self.predictionFile += '\n' self.predictionFile += 'warnings.filterwarnings("ignore")' self.predictionFile += '\n' self.predictionFile += 'import json' self.predictionFile += '\n' self.predictionFile += 'import os' self.predictionFile += '\n' self.predictionFile += 'import sys' self.predictionFile += '\n' self.predictionFile += 'import pandas as pd' self.predictionFile += '\n' self.predictionFile += 'from pandas import json_normalize' self.predictionFile += '\n' self.predictionFile += 'from importlib import import_module' self.predictionFile += '\n' self.predictionFile += 'import importlib.util' self.predictionFile += '\n' self.predictionFile += 'class prediction:' self.predictionFile += '\n' self.predictionFile += ' def predict_from_json(self,json_data):' self.predictionFile += '\n' self.predictionFile += ' data = json.loads(json_data)' self.predictionFile += '\n' self.predictionFile += ' output=self.predict(data)' self.predictionFile += '\n' self.predictionFile += ' print("predictions:",output)' self.predictionFile += '\n' self.predictionFile += '\n' self.predictionFile += ' def predict_from_file(self,filename):' self.predictionFile += '\n' self.predictionFile += ' with open(filename,\'r\',encoding=\'utf-8\') as f:' self.predictionFile += '\n' self.predictionFile += ' data = json.load(f)' self.predictionFile += '\n' self.predictionFile += ' output=self.predict(data)' self.predictionFile += '\n' self.predictionFile += ' print("predictions:",output)' self.predictionFile += '\n' self.predictionFile += '\n' self.predictionFile += ' def predict(self,json_data):' self.predictionFile += '\n' self.predictionFile += ' try:' self.predictionFile += '\n' #self.predictionFile += ' jsonData = json.loads(json_data)' self.predictionFile += ' jsonData=json_data' self.predictionFile += '\n' self.predictionFile += ' model_obj = importlib.util.spec_from_file_location("module.name", os.path.dirname(os.path.abspath(__file__))+"/trained_model.py")' self.predictionFile += '\n' self.predictionFile += ' model = importlib.util.module_from_spec(model_obj)' self.predictionFile += '\n' self.predictionFile += ' model_obj.loader.exec_module(model)' self.predictionFile += '\n' #if(learner_type != 'TextML'): self.predictionFile += ' profiler_obj = importlib.util.spec_from_file_location("module.name", os.path.dirname(os.path.abspath(__file__))+"/inputprofiler.py")' self.predictionFile += '\n' self.predictionFile += ' inputprofiler = importlib.util.module_from_spec(profiler_obj)' self.predictionFile += '\n' self.predictionFile += ' profiler_obj.loader.exec_module(inputprofiler)' self.predictionFile += '\n' self.predictionFile += ' selector_obj = importlib.util.spec_from_file_location("module.name", os.path.dirname(os.path.abspath(__file__))+"/selector.py")' self.predictionFile += '\n' self.predictionFile += ' selector = importlib.util.module_from_spec(selector_obj)' self.predictionFile += '\n' self.predictionFile += ' selector_obj.loader.exec_module(selector)' self.predictionFile += '\n' self.predictionFile += ' output_format_obj = importlib.util.spec_from_file_location("module.name", os.path.dirname(os.path.abspath(__file__))+"/output_format.py")' self.predictionFile += '\n' self.predictionFile += ' output_format = importlib.util.module_from_spec(output_format_obj)' self.predictionFile += '\n' self.predictionFile += ' output_format_obj.loader.exec_module(output_format)' self.predictionFile += '\n' self.predictionFile += ' df = json_normalize(jsonData)' self.predictionFile += '\n' self.predictionFile += ' df0 = df.copy()' self.predictionFile += '\n' #if(learner_type != 'TextML'): self.predictionFile += ' profilerobj = inputprofiler.inputprofiler()' self.predictionFile += '\n' self.predictionFile += ' df = profilerobj.apply_profiler(df)' self.predictionFile += '\n' self.predictionFile += ' selectobj = selector.selector()' self.predictionFile += '\n' self.predictionFile += ' df = selectobj.apply_selector(df)' self.predictionFile += '\n' self.predictionFile += ' output = model.trained_model().predict(df,"")' self.predictionFile += '\n' self.predictionFile += ' outputobj = output_format.output_format()' self.predictionFile += '\n' self.predictionFile += ' output = outputobj.apply_output_format(df0,output)' #self.predictionFile += '\n' #self.predictionFile += ' print(output)' self.predictionFile += '\n' self.predictionFile += ' return output' self.predictionFile += '\n' self.predictionFile += ' except KeyError as e:' self.predictionFile += '\n' self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\'"\')}' self.predictionFile += '\n' self.predictionFile += ' return json.dumps(output)' self.predictionFile += '\n' self.predictionFile += ' except Exception as e:' self.predictionFile += '\n' self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\'"\')}' self.predictionFile += '\n' self.predictionFile += ' return json.dumps(output)' self.predictionFile += '\n' self.predictionFile += '\n' self.predictionFile += 'if __name__ == "__main__":' self.predictionFile += '\n' self.predictionFile += ' predictobj = prediction()' self.predictionFile += '\n' self.predictionFile += ' predictobj.predict_from_file(sys.argv[1])' self.predictionFile += '\n' filename = os.path.join(deploy_path,'prediction.py') f = open(filename, "wb") f.write(str(self.predictionFile).encode('utf8')) f.close() def create_text_drift_file(self,deploy_path,features,target,model_type): #task-14549 self.predictionFile = 'import warnings' self.predictionFile += '\n' self.predictionFile += 'warnings.filterwarnings("ignore")' self.predictionFile += '\n' self.predictionFile += 'import json' self.predictionFile += '\n' self.predictionFile += 'import os' self.predictionFile += '\n' self.predictionFile += 'import sys' self.predictionFile += '\n' self.predictionFile += 'import pandas as pd' self.predictionFile += '\n' self.predictionFile += 'from monitoring import check_drift' self.predictionFile += '\n' self.predictionFile += 'def drift(data):' self.predictionFile += '\n' self.predictionFile += ' try:' self.predictionFile += '\n' self.predictionFile += ' if os.path.splitext(data)[1] == ".json":' self.predictionFile += '\n' self.predictionFile += ' with open(data,\'r\',encoding=\'utf-8\') as f:' self.predictionFile += '\n' self.predictionFile += ' jsonData = json.load(f)' self.predictionFile += '\n' self.predictionFile += ' else:' self.predictionFile += '\n' self.predictionFile += ' jsonData = json.loads(data)' self.predictionFile += '\n' self.predictionFile += ' jsonData[\'features\'] = \''+",".join([feature for feature in features])+'\'' self.predictionFile += '\n' self.predictionFile += ' jsonData[\'target\'] = \''+target+'\'' self.predictionFile += '\n' if model_type.lower() != 'timeseriesforecasting': #task 11997 self.predictionFile += ' htmlfilepath=evidently_details(jsonData)' self.predictionFile += '\n' else: self.predictionFile += ' htmlfilepath=\'\'' self.predictionFile += '\n' self.predictionFile += ' jsonData = json.dumps(jsonData)' self.predictionFile += '\n' self.predictionFile += ' output = check_drift(jsonData)' self.predictionFile += '\n' self.predictionFile += ' output = json.loads(output)' self.predictionFile += '\n' self.predictionFile += ' output[\'htmlPath\'] = str(htmlfilepath)' self.predictionFile += '\n' self.predictionFile += ' print("drift:", json.dumps(output))' self.predictionFile += '\n' self.predictionFile += ' return(output)' self.predictionFile += '\n' self.predictionFile += ' except KeyError as e:' self.predictionFile += '\n' self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\'"\')}' self.predictionFile += '\n' self.predictionFile += ' print("drift:",json.dumps(output))' self.predictionFile += '\n' self.predictionFile += ' return (json.dumps(output))' self.predictionFile += '\n' self.predictionFile += ' except Exception as e:' self.predictionFile += '\n' self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\'"\')}' self.predictionFile += '\n' self.predictionFile += ' print("drift:",json.dumps(output))' self.predictionFile += '\n' self.predictionFile += ' return (json.dumps(output))' self.predictionFile += '\n' if model_type.lower() != 'timeseriesforecasting': #task 11997 self.predictionFile += 'def evidently_details(deployJson):' self.predictionFile += '\n' self.predictionFile += ' features = deployJson[\'features\'].split(\',\')' self.predictionFile += '\n' self.predictionFile += ' target = deployJson[\'target\']' self.predictionFile += '\n' self.predictionFile += """\ try: from evidently.report import Report from evidently.metrics import TextDescriptorsDriftMetric, ColumnDriftMetric from evidently.pipeline.column_mapping import ColumnMapping from sklearn.preprocessing import LabelEncoder historicaldataFrame=pd.read_csv(deployJson['trainingDataLocation'],skipinitialspace = True,na_values=['-','?']) currentdataFrame=pd.read_csv(deployJson['currentDataLocation'],skipinitialspace = True,na_values=['-','?']) historicaldataFrame.columns = historicaldataFrame.columns.str.strip() currentdataFrame.columns = currentdataFrame.columns.str.strip() hdf = historicaldataFrame.dropna(subset=features) cdf = currentdataFrame.dropna(subset=features) hdf['Text_Features'] = hdf[features].apply("-".join, axis=1) cdf['Text_Features'] = cdf[features].apply("-".join, axis=1) hdf['target'] = historicaldataFrame[target] cdf['target'] = currentdataFrame[target] le = LabelEncoder() le.fit(hdf['target']) hdf['target'] = le.transform(hdf['target']) le.fit(cdf['target']) cdf['target'] = le.transform(cdf['target']) hd = hdf[['Text_Features', 'target']] cd = cdf[['Text_Features', 'target']] column_mapping = ColumnMapping() column_mapping.target = 'target' column_mapping.prediction = 'target' column_mapping.text_features = ['Text_Features'] column_mapping.numerical_features = [] column_mapping.categorical_features = [] performance_report = Report(metrics=[ColumnDriftMetric('target'),TextDescriptorsDriftMetric(column_name='Text_Features')]) performance_report.run(reference_data=hd, current_data=cd,column_mapping=column_mapping) report = os.path.join(os.path.dirname(os.path.abspath(__file__)),"log","My_report.html") performance_report.save_html(report) return(report) except Exception as e: print('Error: ', e) return('NA')""" self.predictionFile += '\n' self.predictionFile += 'if __name__ == "__main__":' self.predictionFile += '\n' self.predictionFile += ' output = drift(sys.argv[1])' filename = os.path.join(deploy_path,'aion_ipdrift.py') f = open(filename, "wb") f.write(str(self.predictionFile).encode('utf8')) f.close() def create_drift_file(self,deploy_path,features,target,model_type): self.predictionFile = 'import warnings' self.predictionFile += '\n' self.predictionFile += 'warnings.filterwarnings("ignore")' self.predictionFile += '\n' self.predictionFile += 'import json' self.predictionFile += '\n' self.predictionFile += 'import os' self.predictionFile += '\n' self.predictionFile += 'import sys' self.predictionFile += '\n' self.predictionFile += 'import pandas as pd' self.predictionFile += '\n' self.predictionFile += 'from monitoring import check_drift' self.predictionFile += '\n' self.predictionFile += 'from pandas import json_normalize' self.predictionFile += '\n' self.predictionFile += 'from script.inputprofiler import inputprofiler' self.predictionFile += '\n' self.predictionFile += 'def drift(data):' self.predictionFile += '\n' self.predictionFile += ' try:' self.predictionFile += '\n' self.predictionFile += ' if os.path.splitext(data)[1] == ".json":' self.predictionFile += '\n' self.predictionFile += ' with open(data,\'r\',encoding=\'utf-8\') as f:' self.predictionFile += '\n' self.predictionFile += ' jsonData = json.load(f)' self.predictionFile += '\n' self.predictionFile += ' else:' self.predictionFile += '\n' self.predictionFile += ' jsonData = json.loads(data)' self.predictionFile += '\n' self.predictionFile += ' jsonData[\'features\'] = \''+",".join([feature for feature in features])+'\'' self.predictionFile += '\n' self.predictionFile += ' jsonData[\'target\'] = \''+target+'\'' self.predictionFile += '\n' if model_type.lower() != 'timeseriesforecasting': #task 11997 self.predictionFile += ' htmlfilepath=evidently_details(jsonData)' self.predictionFile += '\n' else: self.predictionFile += ' htmlfilepath=\'\'' self.predictionFile += '\n' self.predictionFile += ' jsonData = json.dumps(jsonData)' self.predictionFile += '\n' self.predictionFile += ' output = check_drift(jsonData)' self.predictionFile += '\n' self.predictionFile += ' output = json.loads(output)' self.predictionFile += '\n' self.predictionFile += ' output[\'htmlPath\'] = str(htmlfilepath)' self.predictionFile += '\n' self.predictionFile += ' output = json.dumps(output)' self.predictionFile += '\n' self.predictionFile += ' print("drift:",output)' self.predictionFile += '\n' self.predictionFile += ' return(output)' self.predictionFile += '\n' self.predictionFile += ' except KeyError as e:' self.predictionFile += '\n' self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\'"\')}' self.predictionFile += '\n' self.predictionFile += ' print("drift:",json.dumps(output))' self.predictionFile += '\n' self.predictionFile += ' return (json.dumps(output))' self.predictionFile += '\n' self.predictionFile += ' except Exception as e:' self.predictionFile += '\n' self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\'"\')}' self.predictionFile += '\n' self.predictionFile += ' print("drift:",json.dumps(output))' self.predictionFile += '\n' self.predictionFile += ' return (json.dumps(output))' self.predictionFile += '\n' if model_type.lower() != 'timeseriesforecasting': #task 11997 self.predictionFile += 'def evidently_details(deployJson):' self.predictionFile += '\n' self.predictionFile += ' features = deployJson[\'features\'].split(\',\')' self.predictionFile += '\n' self.predictionFile += ' target = deployJson[\'target\']' self.predictionFile += '\n' self.predictionFile += """\ try: from evidently.report import Report from evidently.metric_preset import DataDriftPreset historicaldataFrame=pd.read_csv(deployJson['trainingDataLocation'],skipinitialspace = True,na_values=['-','?']) currentdataFrame=pd.read_csv(deployJson['currentDataLocation'],skipinitialspace = True,na_values=['-','?']) historicaldataFrame.columns = historicaldataFrame.columns.str.strip() currentdataFrame.columns = currentdataFrame.columns.str.strip() profilerobj = inputprofiler() historicaldataFramep = profilerobj.run(historicaldataFrame) currentdataFramep = profilerobj.run(currentdataFrame) hdf = historicaldataFramep[features] cdf = currentdataFramep[features] hdf['target'] = historicaldataFrame[target] cdf['target'] = currentdataFrame[target] data_drift_report = Report(metrics = [DataDriftPreset()]) data_drift_report.run(reference_data=hdf,current_data=cdf,column_mapping = None) report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','my_report.html') data_drift_report.save_html(report) return(report) except Exception as e: print('Error') return('NA')""" self.predictionFile += '\n' self.predictionFile += 'if __name__ == "__main__":' self.predictionFile += '\n' self.predictionFile += ' output = drift(sys.argv[1])' filename = os.path.join(deploy_path,'aion_ipdrift.py') f = open(filename, "wb") f.write(str(self.predictionFile).encode('utf8')) f.close() def create_prediction_file(self,classname,deploy_path,learner_type,grouperbyjson,rowfilterexpression,model_type,datetimeFeature): self.predictionFile = 'import warnings' self.predictionFile += '\n' self.predictionFile += 'warnings.filterwarnings("ignore")' self.predictionFile += '\n' self.predictionFile += 'import json' self.predictionFile += '\n' self.predictionFile += 'import os' self.predictionFile += '\n' self.predictionFile += 'import sys' self.predictionFile += '\n' self.predictionFile += 'import pandas as pd' self.predictionFile += '\n' self.predictionFile += 'from pandas import json_normalize' self.predictionFile += '\n' if(learner_type.lower() != 'recommendersystem'): #task 11190 self.predictionFile += 'from script.selector import selector' self.predictionFile += '\n' self.predictionFile += 'from script.inputprofiler import inputprofiler' self.predictionFile += '\n' #self.predictionFile += 'from '+classname+' import '+classname self.predictionFile += 'from script.trained_model import trained_model' self.predictionFile += '\n' else: self.predictionFile += 'from script.item_recommendation import collaborative_filter' self.predictionFile += '\n' self.predictionFile += 'from script.output_format import output_format' self.predictionFile += '\n' if (learner_type != 'RecommenderSystem'): #task 11190 self.predictionFile += 'profilerobj = inputprofiler()' self.predictionFile += '\n' self.predictionFile += 'selectobj = selector()' self.predictionFile += '\n' self.predictionFile += 'modelobj = trained_model()' self.predictionFile += '\n' else: self.predictionFile += 'colabobj = collaborative_filter()' self.predictionFile += '\n' self.predictionFile += 'outputobj = output_format()' self.predictionFile += '\n' self.predictionFile += 'def predict(data):' self.predictionFile += '\n' self.predictionFile += ' try:' self.predictionFile += '\n' self.predictionFile += ' if os.path.splitext(data)[1] == ".tsv":' self.predictionFile += '\n' self.predictionFile += ' df=pd.read_csv(data,encoding=\'utf-8\',sep=\'\\t\',skipinitialspace = True,na_values=[\'-\',\'?\'])' self.predictionFile += '\n' self.predictionFile += ' elif os.path.splitext(data)[1] == ".csv":' self.predictionFile += '\n' self.predictionFile += ' df=pd.read_csv(data,encoding=\'utf-8\',skipinitialspace = True,na_values=[\'-\',\'?\'])' self.predictionFile += '\n' self.predictionFile += ' elif os.path.splitext(data)[1] == ".dat":' self.predictionFile += '\n' self.predictionFile += ' df=pd.read_csv(data,encoding=\'utf-8\',skipinitialspace = True,na_values=[\'-\',\'?\'])' self.predictionFile += '\n' self.predictionFile += ' else:' self.predictionFile += '\n' self.predictionFile += ' if os.path.splitext(data)[1] == ".json":' self.predictionFile += '\n' self.predictionFile += ' with open(data,\'r\',encoding=\'utf-8\') as f:' self.predictionFile += '\n' self.predictionFile += ' jsonData = json.load(f)' self.predictionFile += '\n' self.predictionFile += ' else:' self.predictionFile += '\n' self.predictionFile += ' jsonData = json.loads(data)' self.predictionFile += '\n' self.predictionFile += ' df = json_normalize(jsonData)' self.predictionFile += '\n' self.predictionFile += ' df.rename(columns=lambda x: x.strip(), inplace=True)' self.predictionFile += '\n' if str(rowfilterexpression) != '': self.predictionFile += ' filterexpression = "'+rowfilterexpression+'"' self.predictionFile += '\n' self.predictionFile += ' df = df.query(filterexpression)' self.predictionFile += '\n' #print(grouperbyjson) if str(grouperbyjson) != '': datetime = grouperbyjson['datetime'] unit = grouperbyjson['unit'] if unit == '': self.predictionFile += ' df[\'date\'] = pd.to_datetime(df[\''+datetime+'\'])' self.predictionFile += '\n' else: self.predictionFile += ' df[\'date\'] = pd.to_datetime(df[\''+datetime+'\'],unit=\''+unit+'\')' self.predictionFile += '\n' self.predictionFile += ' df = df.reset_index()' self.predictionFile += '\n' self.predictionFile += ' df.set_index(\'date\',inplace=True)' self.predictionFile += '\n' self.predictionFile += ' df = df.'+grouperbyjson['groupbystring'] self.predictionFile += '\n' self.predictionFile += ' df.columns = df.columns.droplevel(0)' self.predictionFile += '\n' self.predictionFile += ' df = df.reset_index()' self.predictionFile += '\n' self.predictionFile += ' df0 = df.copy()' self.predictionFile += '\n' if(learner_type != 'RecommenderSystem'): #task 11190 if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na': self.predictionFile += ' df,datetimeFeature = profilerobj.apply_profiler(df)' self.predictionFile += '\n' else: self.predictionFile += ' df = profilerobj.apply_profiler(df)' self.predictionFile += '\n' self.predictionFile += ' df = selectobj.apply_selector(df)' self.predictionFile += '\n' #self.predictionFile += ' modelobj = '+classname+'()' self.predictionFile += ' output = modelobj.predict(df,"")' self.predictionFile += '\n' else: self.predictionFile += ' output = colabobj.predict(df)' self.predictionFile += '\n' if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na': self.predictionFile += ' output = outputobj.apply_output_format(df0,output,datetimeFeature)' self.predictionFile += '\n' else: self.predictionFile += ' output = outputobj.apply_output_format(df0,output)' self.predictionFile += '\n' self.predictionFile += ' print("predictions:",output)' self.predictionFile += '\n' self.predictionFile += ' return(output)' self.predictionFile += '\n' self.predictionFile += ' except KeyError as e:' self.predictionFile += '\n' self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\'"\')}' self.predictionFile += '\n' self.predictionFile += ' print("predictions:",json.dumps(output))' self.predictionFile += '\n' self.predictionFile += ' return (json.dumps(output))' self.predictionFile += '\n' self.predictionFile += ' except Exception as e:' self.predictionFile += '\n' self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\'"\')}' self.predictionFile += '\n' self.predictionFile += ' print("predictions:",json.dumps(output))' self.predictionFile += '\n' self.predictionFile += ' return (json.dumps(output))' self.predictionFile += '\n' self.predictionFile += 'if __name__ == "__main__":' self.predictionFile += '\n' self.predictionFile += ' output = predict(sys.argv[1])' filename = os.path.join(deploy_path,'aion_predict.py') f = open(filename, "w") f.write(str(self.predictionFile)) f.close() def create_classification_text_performance_file(self,deploy_path,features,target): features = ",".join([feature for feature in features]) self.predictionFile = """\ import pandas as pd import warnings warnings.filterwarnings("ignore") import json import os import sys from pandas import json_normalize # from evidently.dashboard import Dashboard # from evidently.tabs import ClassificationPerformanceTab from evidently.pipeline.column_mapping import ColumnMapping from aion_predict import predict from evidently.report import Report from evidently.pipeline.column_mapping import ColumnMapping from evidently.metric_preset import ClassificationPreset def odrift(data): try: """ self.predictionFile += ' features = \''+features+'\'' self.predictionFile += '\n' self.predictionFile += ' target = \''+target+'\'' self.predictionFile += '\n' self.predictionFile +="""\ if os.path.splitext(data)[1] == ".json": with open(data,'r',encoding='utf-8') as f: jsonData = json.load(f) else: jsonData = json.loads(data) production = predict().run(jsonData['currentDataLocation']) reference = predict().run(jsonData['trainingDataLocation']) production = json.loads(production) reference = json.loads(reference) if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'): production = production['data'] production = json_normalize(production) reference = reference['data'] reference = json_normalize(reference) production['target'] = production[target] reference['target'] = reference[target] column_mapping = ColumnMapping() column_mapping.target = target column_mapping.prediction = 'prediction' column_mapping.datetime = None column_mapping.text_features = features.split(',') iris_model_performance_dashboard = Report(metrics=[ClassificationPreset()]) iris_model_performance_dashboard.run(reference_data=reference, current_data=production,column_mapping=column_mapping) report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html') iris_model_performance_dashboard.save_html(report) metrics_output = iris_model_performance_dashboard.as_dict() output = {"status":"SUCCESS","htmlPath":report, 'drift_details':metrics_output['metrics']} print("drift:",json.dumps(output)) return (json.dumps(output)) except KeyError as e: print(e) output = {"status":"FAIL","message":str(e).strip('"')} print("drift:",json.dumps(output)) return (json.dumps(output)) except Exception as e: print(e) output = {"status":"FAIL","message":str(e).strip('"')} print("drift:",json.dumps(output)) return (json.dumps(output)) if __name__ == "__main__": output = odrift(sys.argv[1])""" filename = os.path.join(deploy_path,'aion_opdrift.py') f = open(filename, "wb") f.write(str(self.predictionFile).encode('utf8')) f.close() def create_classification_performance_file(self,deploy_path,features,target): features = ",".join([feature for feature in features]) self.predictionFile = """\ import pandas as pd import warnings warnings.filterwarnings("ignore") import json import os import sys from pandas import json_normalize from evidently.report import Report from evidently.metric_preset import ClassificationPreset from evidently.pipeline.column_mapping import ColumnMapping from aion_predict import predict def odrift(data): try: """ self.predictionFile += ' features = \''+features+'\'' self.predictionFile += '\n' self.predictionFile += ' target = \''+target+'\'' self.predictionFile += '\n' self.predictionFile +="""\ if os.path.splitext(data)[1] == ".json": with open(data,'r',encoding='utf-8') as f: jsonData = json.load(f) else: jsonData = json.loads(data) production = predict().run(jsonData['currentDataLocation']) reference = predict().run(jsonData['trainingDataLocation']) production = json.loads(production) reference = json.loads(reference) if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'): production = production['data'] production = json_normalize(production) reference = reference['data'] reference = json_normalize(reference) production['target'] = production[target] reference['target'] = reference[target] column_mapping = ColumnMapping() column_mapping.target = target column_mapping.prediction = 'prediction' column_mapping.datetime = None column_mapping.numerical_features = features.split(',') model_performance_dashboard = Report(metrics = [ClassificationPreset()]) model_performance_dashboard.run(reference_data =reference, current_data =production, column_mapping = column_mapping) report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html') model_performance_dashboard.save_html(report) metrics_output = model_performance_dashboard.as_dict() output = {"status":"SUCCESS","htmlPath":report, 'drift_details':metrics_output['metrics']} print("drift:",json.dumps(output)) return (json.dumps(output)) else: output = {"status":"SUCCESS","htmlPath":'NA'} print("drift:",json.dumps(output)) return (json.dumps(output)) except KeyError as e: print(e) output = {"status":"FAIL","message":str(e).strip('"')} print("drift:",json.dumps(output)) return (json.dumps(output)) except Exception as e: print(e) output = {"status":"FAIL","message":str(e).strip('"')} print("drift:",json.dumps(output)) return (json.dumps(output)) if __name__ == "__main__": output = odrift(sys.argv[1])""" filename = os.path.join(deploy_path,'aion_opdrift.py') f = open(filename, "wb") f.write(str(self.predictionFile).encode('utf8')) f.close() def create_model_service(self,deploy_path,serviceName,problemType): filedata = """ from flask import Flask, jsonify, request from flask_restful import Resource, Api from aion_predict import predict""" if problemType.lower() == 'classification' or problemType.lower() == 'regression': filedata += """ from aion_xai import local_analysis from aion_ipdrift import drift from aion_opdrift import odrift""" filedata += """ import json import os import pandas as pd import io import argparse from pathlib import Path from flask_cors import CORS, cross_origin app = Flask(__name__) #cross origin resource from system arguments parser = argparse.ArgumentParser() parser.add_argument('-ip', '--ipaddress', help='IP Address') parser.add_argument('-p', '--port', help='Port Number') parser.add_argument("-cors", type=str, required=False) d = vars(parser.parse_args()) modelPath = Path(__file__).parent try: with open( (modelPath/'etc')/'display.json', 'r') as f: disp_data = json.load(f) is_explainable = not disp_data.get('textFeatures') except: disp_data = {} is_explainable = True if "cors" in d.keys(): if d["cors"] != '' and d["cors"] != None: d["cors"] = [s.strip() for s in d["cors"].split(",")] #cors = CORS(app, resources={r"/AION/*": {"origins": ["http://localhost", "http://localhost:5000"]}}) cors = CORS(app, resources={r"/AION/*": {"origins": d["cors"]}}) api = Api(app) class predictapi(Resource): def get(self): features = disp_data.get('modelFeatures') if features: msg=\""" RequestType: POST Content-Type=application/json Body: {displaymsg} \""".format(displaymsg={ x:'Value' for x in features}) else: displaymsg='Data in JSON Format' return jsonify(displaymsg) def post(self): data = request.get_json() output = predict().run(json.dumps(data)) return jsonify(json.loads(output)) class predictfileapi(Resource): def post(self): if 'file' in request.files: file = request.files['file'] urlData = file.read() rawData = pd.read_csv(io.StringIO(urlData.decode('utf-8'))) data = rawData.to_json(orient='records') output = predict().run(data) return jsonify(json.loads(output)) else: displaymsg='File is mising' return jsonify(displaymsg) def get(self): msg=\""" RequestType: POST Body:send file content in body\""" return jsonify(msg) """ if problemType.lower() == 'classification' or problemType.lower() == 'regression': filedata += """ class explainapi(Resource): def get(self): features = disp_data.get('modelFeatures') if features: msg=\""" RequestType: POST Content-Type=application/json Body: {displaymsg} \""".format(displaymsg={ x:'Value' for x in features}) else: displaymsg='Data in JSON Format' return jsonify(displaymsg) def post(self): data = request.get_json() if is_explainable: output = local_analysis(json.dumps(data)) else: output = json.dumps({"status":"FAIL","data":"explain api is not supported when text features are used for training"}) return jsonify(json.loads(output)) class monitoringapi(Resource): def get(self): return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'}) def post(self): data = request.get_json() output = drift(json.dumps(data)) return jsonify(json.loads(output)) class performanceapi(Resource): def get(self): return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'}) def post(self): data = request.get_json() output = odrift(json.dumps(data)) return jsonify(json.loads(output)) """ filedata += """ api.add_resource(predictapi, '/AION/{serviceName}/predict')""".format(serviceName=serviceName) filedata += """ api.add_resource(predictfileapi, '/AION/{serviceName}/predict_file')""".format(serviceName=serviceName) if problemType.lower() == 'classification' or problemType.lower() == 'regression': filedata += """ api.add_resource(explainapi, '/AION/{serviceName}/explain') api.add_resource(monitoringapi, '/AION/{serviceName}/monitoring') api.add_resource(performanceapi, '/AION/{serviceName}/performance')""".format(serviceName=serviceName) filedata += """ if __name__ == '__main__': args = parser.parse_args() app.run(args.ipaddress,port = args.port,debug = True)""" filename = os.path.join(deploy_path,'aion_service.py') f = open(filename, "wb") f.write(str(filedata).encode('utf8')) f.close() def create_regression_performance_file(self,deploy_path,features,target): features = ",".join([feature for feature in features]) self.predictionFile = """\ import pandas as pd import warnings warnings.filterwarnings("ignore") import json import os import sys from pandas import json_normalize from evidently.report import Report from evidently.metric_preset import RegressionPreset from evidently.pipeline.column_mapping import ColumnMapping from aion_predict import predict def odrift(data): try: """ self.predictionFile += ' features = \''+features+'\'' self.predictionFile += '\n' self.predictionFile += ' target = \''+target+'\'' self.predictionFile += '\n' self.predictionFile +="""\ if os.path.splitext(data)[1] == ".json": with open(data,'r',encoding='utf-8') as f: jsonData = json.load(f) else: jsonData = json.loads(data) production = predict().run(jsonData['currentDataLocation']) reference = predict().run(jsonData['trainingDataLocation']) production = json.loads(production) reference = json.loads(reference) if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'): production = production['data'] production = json_normalize(production) reference = reference['data'] reference = json_normalize(reference) production['target'] = production[target] reference['target'] = reference[target] column_mapping = ColumnMapping() column_mapping.target = target column_mapping.prediction = 'prediction' column_mapping.datetime = None column_mapping.numerical_features = features.split(',') iris_model_performance_dashboard = Report(metrics=[RegressionPreset()]) iris_model_performance_dashboard.run(reference_data = reference, current_data = production, column_mapping = column_mapping) report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html') iris_model_performance_dashboard.save_html(report) metrics_output = iris_model_performance_dashboard.as_dict() output = {"status":"SUCCESS","htmlPath":report, 'drift_details':metrics_output['metrics']} print("drift:",json.dumps(output)) return (json.dumps(output)) else: output = {"status":"SUCCESS","htmlPath":'NA'} print("drift:",json.dumps(output)) return (json.dumps(output)) except KeyError as e: print(e) output = {"status":"FAIL","message":str(e).strip('"')} print("drift:",json.dumps(output)) return (json.dumps(output)) except Exception as e: print(e) output = {"status":"FAIL","message":str(e).strip('"')} print("drift:",json.dumps(output)) return (json.dumps(output)) if __name__ == "__main__": output = odrift(sys.argv[1])""" filename = os.path.join(deploy_path,'aion_opdrift.py') f = open(filename, "wb") f.write(str(self.predictionFile).encode('utf8')) f.close() def create_regression_text_performance_file(self,deploy_path,features,target): features = ",".join([feature for feature in features]) self.predictionFile = """\ import pandas as pd import warnings warnings.filterwarnings("ignore") import json import os import sys from pandas import json_normalize from aion_predict import predict from evidently.report import Report from evidently.pipeline.column_mapping import ColumnMapping from evidently.metric_preset import RegressionPreset def odrift(data): try: """ self.predictionFile += ' features = \''+features+'\'' self.predictionFile += '\n' self.predictionFile += ' target = \''+target+'\'' self.predictionFile += '\n' self.predictionFile +="""\ if os.path.splitext(data)[1] == ".json": with open(data,'r',encoding='utf-8') as f: jsonData = json.load(f) else: jsonData = json.loads(data) production = predict().run(jsonData['currentDataLocation']) reference = predict().run(jsonData['trainingDataLocation']) production = json.loads(production) reference = json.loads(reference) if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'): production = production['data'] production = json_normalize(production) reference = reference['data'] reference = json_normalize(reference) production['target'] = production[target] reference['target'] = reference[target] column_mapping = ColumnMapping() column_mapping.target = target column_mapping.prediction = 'prediction' column_mapping.datetime = None column_mapping.numerical_features = features.split(',') iris_model_performance_dashboard = Report(metrics=[RegressionPreset()]) iris_model_performance_dashboard.run(reference_data=reference, current_data=production,column_mapping=column_mapping) report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html') iris_model_performance_dashboard.save_html(report) metrics_output = iris_model_performance_dashboard.as_dict() output = {"status":"SUCCESS","htmlPath":report, 'drift_details':metrics_output['metrics']} print("drift:",json.dumps(output)) return (json.dumps(output)) else: output = {"status":"SUCCESS","htmlPath":'NA'} print("drift:",json.dumps(output)) return (json.dumps(output)) except KeyError as e: print(e) output = {"status":"FAIL","message":str(e).strip('"')} print("drift:",json.dumps(output)) return (json.dumps(output)) except Exception as e: print(e) output = {"status":"FAIL","message":str(e).strip('"')} print("drift:",json.dumps(output)) return (json.dumps(output)) if __name__ == "__main__": output = odrift(sys.argv[1])""" filename = os.path.join(deploy_path,'aion_opdrift.py') f = open(filename, "wb") f.write(str(self.predictionFile).encode('utf8')) f.close() def create_publish_service(self,datalocation,usecaseid,version,problemType): filename = os.path.join(datalocation,'aion_publish_service.py') if not os.path.exists(filename): filedata = """ import sys import json import time import sqlite3 import argparse import pandas as pd import io from pathlib import Path from datetime import datetime filename = Path(__file__).parent/'config.json' with open (filename, "r") as f: data = json.loads(f.read()) modelVersion = str(data['version']) modelPath = Path(__file__).parent/modelVersion sys.path.append(str(modelPath)) try: with open( (modelPath/'etc')/'display.json', 'r') as f: disp_data = json.load(f) is_explainable = not disp_data.get('textFeatures') except: disp_data = {} is_explainable = True from flask import Flask, jsonify, request from flask_restful import Resource, Api from flask_cors import CORS, cross_origin from flask import Response from aion_predict import predict """ if problemType.lower() == 'classification' or problemType.lower() == 'regression': filedata += """ from aion_ipdrift import drift from aion_opdrift import odrift if is_explainable: from aion_xai import local_analysis """ filedata += """ dataPath = Path(__file__).parent/'data' dataPath.mkdir(parents=True, exist_ok=True) app = Flask(__name__) #cross origin resource from system arguments parser = argparse.ArgumentParser() parser.add_argument('-ip', '--ipaddress', help='IP Address') parser.add_argument('-p', '--port', help='Port Number') parser.add_argument("-cors", type=str, required=False) d = vars(parser.parse_args()) if "cors" in d.keys(): if d["cors"] != '' and d["cors"] != None: d["cors"] = [s.strip() for s in d["cors"].split(",")] #cors = CORS(app, resources={r"/AION/*": {"origins": ["http://localhost", "http://localhost:5000"]}}) cors = CORS(app, resources={r"/AION/*": {"origins": d["cors"]}}) api = Api(app) class sqlite_db(): def __init__(self, location, database_file=None): if not isinstance(location, Path): location = Path(location) if database_file: self.database_name = database_file else: self.database_name = location.stem + '.db' db_file = str(location/self.database_name) self.conn = sqlite3.connect(db_file) self.cursor = self.conn.cursor() self.tables = [] def table_exists(self, name): if name in self.tables: return True elif name: query = f"SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';" listOfTables = self.cursor.execute(query).fetchall() if len(listOfTables) > 0 : self.tables.append(name) return True return False def read(self, table_name,condition=''): if condition == '': return pd.read_sql_query(f"SELECT * FROM {table_name}", self.conn) else: return pd.read_sql_query(f"SELECT * FROM {table_name} WHERE {condition}", self.conn) def create_table(self,name, columns, dtypes): query = f'CREATE TABLE IF NOT EXISTS {name} (' for column, data_type in zip(columns, dtypes): query += f"'{column}' TEXT," query = query[:-1] query += ');' self.conn.execute(query) return True def update(self,table_name,updates,condition): update_query = f'UPDATE {table_name} SET {updates} WHERE {condition}' self.cursor.execute(update_query) self.conn.commit() return True def write(self,data, table_name): if not self.table_exists(table_name): self.create_table(table_name, data.columns, data.dtypes) tuple_data = list(data.itertuples(index=False, name=None)) insert_query = f'INSERT INTO {table_name} VALUES(' for i in range(len(data.columns)): insert_query += '?,' insert_query = insert_query[:-1] + ')' self.cursor.executemany(insert_query, tuple_data) self.conn.commit() return True def delete(self, name): pass def close(self): self.conn.close()""" filedata += """ app = Flask(__name__) api = Api(app) class predictapi(Resource): def get(self): features = disp_data.get('modelFeatures') if features: msg=\""" RequestType: POST Content-Type=application/json Body: {displaymsg} \""".format(displaymsg={ x:'Value' for x in features}) else: displaymsg='Data in JSON Format' return jsonify(displaymsg) def post(self): sqlite_dbObj = sqlite_db(dataPath,'data.db') if not sqlite_dbObj.table_exists('metrices'): data = {'noOfPredictCalls':'0','noOfDriftCalls':'0',"noOfActualCalls":'0',"mid":'0'} data = pd.DataFrame(data, index=[0]) sqlite_dbObj.create_table('metrices',data.columns, data.dtypes) data = request.get_json() output = predict().run(json.dumps(data)) outputobj = json.loads(output) if outputobj['status'] == 'SUCCESS': try: df2 = pd.read_json(json.dumps(outputobj['data']), orient ='records') if not sqlite_dbObj.table_exists('prodData'): sqlite_dbObj.create_table('prodData',df2.columns, df2.dtypes) sqlite_dbObj.write(df2,'prodData') except: pass try: data = sqlite_dbObj.read('metrices') #print(data) if len(data) == 0: data = [{'mid':'0','noOfPredictCalls':'1','noOfDriftCalls':'0',"noOfActualCalls":'0'}] data = pd.read_json(json.dumps(data), orient ='records') sqlite_dbObj.write(data,'metrices') else: noofPredictCalls = int(data['noOfPredictCalls'].iloc[0])+1 sqlite_dbObj.update('metrices',"noOfPredictCalls = '"+str(noofPredictCalls)+"'","mid = 0") except Exception as e: print(e) pass return jsonify(json.loads(output)) class predictfileapi(Resource): def post(self): sqlite_dbObj = sqlite_db(dataPath,'data.db') if not sqlite_dbObj.table_exists('metrices'): data = {'noOfPredictCalls':'0','noOfDriftCalls':'0',"noOfActualCalls":'0',"mid":'0'} data = pd.DataFrame(data, index=[0]) sqlite_dbObj.create_table('metrices',data.columns, data.dtypes) if 'file' in request.files: file = request.files['file'] urlData = file.read() rawData = pd.read_csv(io.StringIO(urlData.decode('utf-8'))) data = rawData.to_json(orient='records') output = predict().run(data) outputobj = json.loads(output) if outputobj['status'] == 'SUCCESS': try: df2 = pd.read_json(json.dumps(outputobj['data']), orient ='records') if not sqlite_dbObj.table_exists('prodData'): sqlite_dbObj.create_table('prodData',df2.columns, df2.dtypes) sqlite_dbObj.write(df2,'prodData') except: pass try: data = sqlite_dbObj.read('metrices') #print(data) if len(data) == 0: data = [{'mid':'0','noOfPredictCalls':'1','noOfDriftCalls':'0',"noOfActualCalls":'0'}] data = pd.read_json(json.dumps(data), orient ='records') sqlite_dbObj.write(data,'metrices') else: noofPredictCalls = int(data['noOfPredictCalls'].iloc[0])+1 sqlite_dbObj.update('metrices',"noOfPredictCalls = '"+str(noofPredictCalls)+"'","mid = 0") except Exception as e: print(e) pass return jsonify(json.loads(output)) else: output = {'status':'error','msg':'File is missing'} return jsonify(output) """ if problemType.lower() == 'classification' or problemType.lower() == 'regression': filedata += """ class explainapi(Resource): def get(self): features = disp_data.get('modelFeatures') if features: msg=\""" RequestType: POST Content-Type=application/json Body: {displaymsg} \""".format(displaymsg={ x:'Value' for x in features}) else: displaymsg='Data in JSON Format' return jsonify(displaymsg) def post(self): data = request.get_json() if is_explainable: output = local_analysis(json.dumps(data)) else: output = json.dumps({"status":"FAIL","data":"explain api is not supported when text features are used for training"}) return jsonify(json.loads(output)) class monitoringapi(Resource): def get(self): return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'}) def post(self): sqlite_dbObj = sqlite_db(dataPath,'data.db') if not sqlite_dbObj.table_exists('monitoring'): data = {'status':'No Drift','Msg':'No Input Drift Found','RecordTime':'Time','version':'1'} data = pd.DataFrame(data, index=[0]) sqlite_dbObj.create_table('monitoring',data.columns, data.dtypes) trainingDataPath = (modelPath/'data')/'preprocesseddata.csv.gz' if not sqlite_dbObj.table_exists('prodData'): return jsonify({'status':'Error','msg':'Prod data not available'}) data = sqlite_dbObj.read('prodData') filetimestamp = str(int(time.time())) dataFile = dataPath/('AION_' + filetimestamp+'.csv') data.to_csv(dataFile, index=False) data = request.get_json() data={'trainingDataLocation':trainingDataPath,'currentDataLocation':dataFile} output = drift(json.dumps(data)) outputData = json.loads(output) status = outputData['status'] if status == 'SUCCESS': Msg = str(outputData['data']) else: Msg = 'Error during drift analysis' now = datetime.now() # current date and time date_time = now.strftime("%m/%d/%Y, %H:%M:%S") data = {'status':status,'Msg':Msg,'RecordTime':date_time,'version':modelVersion} data = pd.DataFrame(data, index=[0]) sqlite_dbObj.write(data,'monitoring') return jsonify(json.loads(output))""" filedata += """ class matricesapi(Resource): def get(self): sqlite_dbObj = sqlite_db(dataPath,'data.db') if sqlite_dbObj.table_exists('metrices'): df1 = sqlite_dbObj.read('metrices') else: df1 = pd.DataFrame() #print(df1) if sqlite_dbObj.table_exists('monitoring'): df2 = sqlite_dbObj.read('monitoring') else: df2 = pd.DataFrame() msg = {'Deployed Version':str(modelVersion)} if df1.shape[0] > 0: msg.update({'noOfPredictCalls':str(df1['noOfPredictCalls'].iloc[0])}) else: msg.update({'noOfPredictCalls':'0'}) driftDetails = [] for idx in reversed(df2.index): driftd = {'version':str(df2.version[idx]),'status':str(df2.status[idx]),'recordTime':str(df2.RecordTime[idx]),'msg':str(df2.Msg[idx])} driftDetails.append(driftd) msg.update({'driftDetails':driftDetails}) return jsonify(msg) class performanceapi(Resource): def get(self): return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'}) def post(self): sqlite_dbObj = sqlite_db(dataPath,'data.db') if not sqlite_dbObj.table_exists('monitoring'): data = {'status':'No Drift','Msg':'No Input Drift Found','RecordTime':'Time','version':'1'} data = pd.DataFrame(data, index=[0]) sqlite_dbObj.create_table('monitoring',data.columns, data.dtypes) trainingDataPath = (modelPath/'data')/'preprocesseddata.csv.gz' if not sqlite_dbObj.table_exists('prodData'): return jsonify({'status':'Error','msg':'Prod data not available'}) data = sqlite_dbObj.read('prodData') filetimestamp = str(int(time.time())) dataFile = dataPath/('AION_' + filetimestamp+'.csv') data.to_csv(dataFile, index=False) data = request.get_json() data={'trainingDataLocation':trainingDataPath,'currentDataLocation':dataFile} output = odrift(json.dumps(data)) return jsonify(json.loads(output)) """ filedata += """ api.add_resource(predictapi, '/AION/{serviceName}/predict') api.add_resource(predictfileapi, '/AION/{serviceName}/predict_file') api.add_resource(matricesapi, '/AION/{serviceName}/metrices')""".format(serviceName=usecaseid) if problemType.lower() == 'classification' or problemType.lower() == 'regression': filedata += """ api.add_resource(explainapi, '/AION/{serviceName}/explain') api.add_resource(monitoringapi, '/AION/{serviceName}/monitoring') api.add_resource(performanceapi, '/AION/{serviceName}/performance') """.format(serviceName=usecaseid) filedata += """ if __name__ == '__main__': args = parser.parse_args() app.run(args.ipaddress,port = args.port,debug = True)""" f = open(filename, "wb") f.write(str(filedata).encode('utf8')) f.close() data = {'version':version} filename = os.path.join(datalocation,'config.json') with open(filename, "w") as outfile: json.dump(data, outfile) outfile.close()
utility.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' TAB_CHAR = ' ' * 4 def import_modules(importer, modules_list): for module in modules_list: mod_from = module.get('mod_from',None) mod_as = module.get('mod_as',None) importer.addModule(module['module'], mod_from=mod_from, mod_as=mod_as)
imports.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ from importlib.metadata import version import sys class importModule(): def __init__(self): self.importModule = {} self.stdlibModule = [] self.localModule = {} def addLocalModule(self,module, mod_from=None, mod_as=None): if module == '*': if module not in self.localModule.keys(): self.localModule[module]= [mod_from] else: self.localModule[module].append(mod_from) elif module not in self.localModule.keys(): self.localModule[module] = {'from':mod_from, 'as':mod_as} def addModule(self, module, mod_from=None, mod_as=None): if module not in self.importModule.keys(): self.importModule[module] = {'from':mod_from, 'as':mod_as} if module in sys.stdlib_module_names: self.stdlibModule.append(module) elif isinstance(self.importModule[module], list): if mod_as not in [x['as'] for x in self.importModule[module]]: self.importModule[module].append({'from':mod_from, 'as':mod_as}) elif mod_as not in [x['from'] for x in self.importModule[module]]: self.importModule[module].append({'from':mod_from, 'as':mod_as}) elif mod_as != self.importModule[module]['as']: as_list = [self.importModule[module]] as_list.append({'from':mod_from, 'as':mod_as}) self.importModule[module] = as_list elif mod_from != self.importModule[module]['from']: as_list = [self.importModule[module]] as_list.append({'from':mod_from, 'as':mod_as}) self.importModule[module] = as_list def getModules(self): return (self.importModule, self.stdlibModule) def getBaseModule(self, extra_importers=[]): modules_alias = { 'sklearn':'scikit-learn', 'genetic_selection':'sklearn-genetic', 'google': 'google-cloud-storage', 'azure':'azure-storage-file-datalake'} local_modules = {'AIX':'/app/AIX-0.1-py3-none-any.whl'} modules = [] require = "" if extra_importers: extra_importers = [importer.importModule for importer in extra_importers if isinstance(importer, importModule)] importers_module = [self.importModule] + extra_importers for importer_module in importers_module: for k,v in importer_module.items(): if v['from']: mod = v['from'].split('.')[0] else: mod = k if mod in modules_alias.keys(): mod = modules_alias[mod] modules.append(mod) modules = list(set(modules)) for mod in modules: try: if mod in local_modules.keys(): require += f"{local_modules[mod]}\n" else: require += f"{mod}=={version(mod)}\n" except : if mod not in sys.stdlib_module_names: raise return require def getCode(self): def to_string(k, v): mod = '' if v['from']: mod += 'from {} '.format(v['from']) mod += 'import {}'.format(k) if v['as']: mod += ' as {} '.format(v['as']) return mod modules = "" local_modules = "" std_lib_modules = "" third_party_modules = "" for k,v in self.importModule.items(): if k in self.stdlibModule: std_lib_modules = std_lib_modules + '\n' + to_string(k, v) elif isinstance(v, dict): third_party_modules = third_party_modules + '\n' + to_string(k, v) elif isinstance(v, list): for alias in v: third_party_modules = third_party_modules + '\n' + to_string(k, alias) for k,v in self.localModule.items(): if k != '*': local_modules = local_modules + '\n' + to_string(k, v) else: for mod_from in v: local_modules = local_modules + '\n' + f'from {mod_from} import {k}' if std_lib_modules: modules = modules + "\n#Standard Library modules" + std_lib_modules if third_party_modules: modules = modules + "\n\n#Third Party modules" + third_party_modules if local_modules: modules = modules + "\n\n#local modules" + local_modules + '\n' return modules def copyCode(self, importer): self.importModule, self.stdlibModule = importer.getModules()
EncryptPythonSourceCode.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import subprocess import os import glob import sys import python_minifier def encrypt_files(path): cwd = os.getcwd() secure_path = os.path.join(path,'SecuredScripts') try: if not os.path.exists(secure_path): os.mkdir(secure_path) files = [f for f in glob.glob(path + "/*.py")] for file in files: #encrypted_file_details[0] = file #file = files[0] #print(file) #filename_w_dir = os.path.splitext(file) filename_w_ext = os.path.basename(file) filename, file_extension = os.path.splitext(filename_w_ext) file_folder_path = os.path.join(secure_path,filename) #print(file_folder_path) if not os.path.exists(file_folder_path): os.mkdir(file_folder_path) # Minify python source code minify_file = os.path.join(file_folder_path,filename+'_minify.py') pythonfolder,_ = os.path.split(sys.executable) pyminify_script = os.path.join(pythonfolder,'Scripts','pyminify.exe') minify_command = "\""+sys.executable+"\" \""+pyminify_script+ "\" \"" + file + "\" > \"" + minify_file+"\"" subprocess.call(minify_command, shell=True) # Change directory to folder path os.chdir(file_folder_path) # Obfuscate minified file pyarmor_script = os.path.join(pythonfolder,'Scripts','pyarmor.exe') obfusc_commmand = "\""+sys.executable+"\" \""+pyarmor_script+"\" obfuscate \"" + minify_file+"\"" #print(obfusc_commmand) subprocess.call(obfusc_commmand, shell=True) # Change directory to dist path obfusc_file = os.path.join(file_folder_path,'dist',filename+'_minify.py') #print(obfusc_file) chdirpath = os.path.join(file_folder_path,'dist') os.chdir(chdirpath) # Compress obfuscated file compressed_file = os.path.join(file_folder_path,'dist',filename+'_compressed.py') #print(compressed_file) pyminifier_script = os.path.join(pythonfolder,'Scripts','pyminifier.exe') compress_command = "\""+sys.executable+"\" \""+pyminifier_script+"\" --gzip -o \"" +compressed_file + "\" \"" + obfusc_file+"\"" #print(compress_command) subprocess.call(compress_command, shell=True) #compile_command = sys.executable+'-m py_compile "' + compressed_file+'"' #print(compile_command) #subprocess.call(compile_command , shell=True) #encrypted_file_details['compiled_file'] = file #compiled_file = os.path.join(file_folder_path,'dist','__pycache__',filename+'_compressed.cpython-37.pyc') #encrypted_file_details[1] = compiled_file #encrypted_file_list.append(encrypted_file_details) #encrypted_file = filename + '_compressed.cpython-37_encrypted.pyc' #encrypt_command = "python " + cwd + "\\Encrypt_Key_Dcrypt.py " + compiled_file + ' ' + encrypted_file + " --g -e" #print(encrypt_command) #subprocess.call(encrypt_command, shell=True) #encrypted_file_list += ']' #return(encrypted_file_list) os.chdir(path) except OSError as err: print ("Creation of the directory %s failed "+str(err)) # Driver function if __name__=="__main__": path = sys.argv[1] encrypt_files(path) #(base) C:\Himanshu\DataPreprocessing>pyminify DataPreprocessing.py > DataPreprocessing_minify.py #Obfuscate #(base) C:\Himanshu\DataPreprocessing>pyarmor obfuscate C:\Himanshu\DataPreprocessing\DataPreprocessing_minify.py #Compression #(base) C:\Himanshu\DataPreprocessing>pyminifier --gzip -o C:\Himanshu\DataPreprocessing\dist\DataPreprocessing_compressed.py C:\Himanshu\DataPreprocessing\dist\DataPreprocessing_minify.py #(base) C:\Himanshu\DataPreprocessing>cd dist #(base) C:\Himanshu\DataPreprocessing\dist>python DataPreprocessing_compressed.py "DocumentText" "Label" 90 ".csv" "C:\Himanshu\DataAcquisition\ClassificationDataNewBalanced.csv" #Compiling compressed .py to .pyc file #(base) C:\Himanshu\DataPreprocessing\dist>python -m py_compile DataPreprocessing_compressed.py #Encrypt .pyc file #(base) C:\Himanshu\DataPreprocessing\dist>python C:\Himanshu\Encrypt_Key_Dcrypt.py C:\Himanshu\DataPreprocessing\dist\__pycache__\DataPreprocessing_compressed.cpython-36.pyc DataPreprocessing_compressed.cpython-36_encrypted.pyc --g -e #Decrypt file #(base) C:\Himanshu\DataPreprocessing\dist>python C:\Himanshu\Encrypt_Key_Dcrypt.py DataPreprocessing_compressed.cpython-36_encrypted.pyc DataPreprocessing_compressed.cpython-36_decrypted.pyc --d #Run decrypted file #(base) C:\Himanshu\DataPreprocessing\dist>python DataPreprocessing_compressed.cpython-36_decrypted.pyc "DocumentText" "Label" 90 ".csv" "C:\Himanshu\DataAcquisition\ClassificationDataNewBalanced.csv"
create_docker.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import shutil import subprocess from os.path import expanduser import platform deploymentfolder = os.path.join(os.path.dirname(os.path.abspath(__file__)),'HCLT','AION','target') modelname='AION_12' version='1' def createDockerImage(deploymentfolder,modelname,version,learner_type,textdata): modelPath = os.path.join(deploymentfolder) filename = os.path.join(deploymentfolder,'docker_image') modelservice = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','run_modelService.py') shellscript = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','start_modelservice.sh') aix = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','AIX-0.1-py3-none-any.whl') drift = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','Drift-0.1-py3-none-any.whl') sitepackage = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','site-packages') model_dockerSetup = os.path.join(os.path.dirname(os.path.abspath(__file__)),'dockersetup','docker_'+modelname + '_' + version) docker_setup = os.path.join(model_dockerSetup,modelname + '_' + version) model_sitepackage = os.path.join(model_dockerSetup,'site-packages') model_dockerSetupservicefile = os.path.join(model_dockerSetup,'run_modelService.py') model_dockershellscript = os.path.join(model_dockerSetup,'start_modelservice.sh') model_aix = os.path.join(model_dockerSetup,'AIX-0.1-py3-none-any.whl') model_drift = os.path.join(model_dockerSetup,'Drift-0.1-py3-none-any.whl') try: os.mkdir(model_dockerSetup) except Exception as e: print("Error in creating Setup directpry "+str(e)) pass shutil.copytree(modelPath, docker_setup) if textdata: shutil.copytree(sitepackage, model_sitepackage) modelpretrainpath=os.path.join(model_dockerSetup,'HCLT','AION','PreTrainedModels','TextProcessing') ''' try: os.makedirs(modelpretrainpath, exist_ok=True) except Exception as e: print("Error in creating Setup directpry "+str(e)) pass ''' home = expanduser("~") if platform.system() == 'Windows': hostpretrainpath = os.path.join(home,'AppData','Local','HCLT','AION','PreTrainedModels','TextProcessing') else: hostpretrainpath = os.path.join(home,'HCLT','AION','PreTrainedModels','TextProcessing') shutil.copytree(hostpretrainpath, modelpretrainpath) shutil.copyfile(modelservice, model_dockerSetupservicefile) shutil.copyfile(shellscript, model_dockershellscript) shutil.copyfile(aix, model_aix) shutil.copyfile(drift,model_drift) try: os.mkdir(filename) except: pass requirementfilename = os.path.join(model_dockerSetup,'requirements.txt') installfilename = os.path.join(model_dockerSetup,'install.py') dockerfile = os.path.join(model_dockerSetup,'Dockerfile') dockerdata='FROM python:3.8-slim-buster' dockerdata+='\n' if textdata: dockerdata+='WORKDIR /root' dockerdata+='\n' dockerdata+='COPY HCLT HCLT' dockerdata+='\n' dockerdata+='WORKDIR /app' dockerdata+='\n' dockerdata+='COPY requirements.txt requirements.txt' dockerdata+='\n' dockerdata+='COPY '+modelname+'_'+version+' '+modelname+'_'+version dockerdata+='\n' if textdata: dockerdata+='COPY site-packages site-packages' dockerdata+='\n' dockerdata+='COPY install.py install.py' dockerdata+='\n' dockerdata+='COPY run_modelService.py run_modelService.py' dockerdata+='\n' dockerdata+='COPY AIX-0.1-py3-none-any.whl AIX-0.1-py3-none-any.whl' dockerdata+='\n' dockerdata+='COPY Drift-0.1-py3-none-any.whl Drift-0.1-py3-none-any.whl' dockerdata+='\n' dockerdata+='COPY start_modelservice.sh start_modelservice.sh' dockerdata+='\n' if textdata: dockerdata+='''RUN apt-get update \ && apt-get install -y build-essential manpages-dev \ && python -m pip install --no-cache-dir --upgrade pip \ && python -m pip install --no-cache-dir pandas==1.2.4 \ && python -m pip install --no-cache-dir numpy==1.19.5 \ && python -m pip install --no-cache-dir joblib==1.0.1 \ && python -m pip install --no-cache-dir Cython==0.29.23 \ && mv site-packages/* /usr/local/lib/python3.8/site-packages \ && python -m pip install --no-cache-dir scipy==1.6.3 \ && python -m pip install --no-cache-dir AIX-0.1-py3-none-any.whl \ && python -m pip install --no-cache-dir Drift-0.1-py3-none-any.whl \ && python -m pip install --no-cache-dir scikit-learn==0.24.2 \ && python -m pip install --no-cache-dir spacy==2.2.3 \ && python -m pip install --no-cache-dir nltk==3.6.2 \ && python -m pip install --no-cache-dir textblob==0.15.3 \ && python -m pip install --no-cache-dir gensim==3.8.3 \ && python -m pip install --no-cache-dir demoji==1.1.0 \ && python -m pip install --no-cache-dir lxml==4.6.3 \ && python -m pip install --no-cache-dir Beautifulsoup4==4.9.3 \ && python -m pip install --no-cache-dir Unidecode==1.2.0 \ && python -m pip install --no-cache-dir pyspellchecker==0.6.2 \ && python -m pip install --no-cache-dir pycontractions==2.0.1 \ && python -m pip install --no-cache-dir tensorflow==2.4.1 \ && python -m pip install --no-cache-dir nltk==3.6.2 \ && python -m pip install --no-cache-dir -r requirements.txt \ && python install.py \ && chmod +x start_modelservice.sh ENTRYPOINT ["./start_modelservice.sh"] ''' else: dockerdata+='''RUN apt-get update \ && apt-get install -y build-essential manpages-dev \ && python -m pip install --no-cache-dir --upgrade pip \ && python -m pip install --no-cache-dir pandas==1.2.4 \ && python -m pip install --no-cache-dir numpy==1.19.5 \ && python -m pip install --no-cache-dir joblib==1.0.1 \ && python -m pip install --no-cache-dir Cython==0.29.23 \ && python -m pip install --no-cache-dir scipy==1.6.3 \ && python -m pip install --no-cache-dir AIX-0.1-py3-none-any.whl \ && python -m pip install --no-cache-dir Drift-0.1-py3-none-any.whl \ && python -m pip install --no-cache-dir scikit-learn==0.24.2 \ && python -m pip install --no-cache-dir -r requirements.txt \ && chmod +x start_modelservice.sh ENTRYPOINT ["./start_modelservice.sh"] ''' f = open(dockerfile, "w") f.write(str(dockerdata)) f.close() requirementdata='' requirementdata+='word2number==1.1' if learner_type == 'DL': requirementdata+='\n' requirementdata+='tensorflow==2.5.0' f = open(requirementfilename, "w") f.write(str(requirementdata)) f.close() if textdata: installfile=''' import nltk import ssl try: _create_unverified_https_context = ssl._create_unverified_context except AttributeError: pass else: ssl._create_default_https_context = _create_unverified_https_context nltk.download('punkt') nltk.download('wordnet') nltk.download('stopwords') nltk.download('averaged_perceptron_tagger')''' f = open(installfilename, "w") f.write(str(installfile)) f.close() try: command = 'docker pull python:3.8-slim-buster' os.system(command); #subprocess.check_call(["chmod", "+x", "start_modelservice.sh"], cwd=model_dockerSetup) subprocess.check_call(["docker", "build", "-t",modelname.lower()+":"+version,"."], cwd=model_dockerSetup) subprocess.check_call(["docker", "save", "-o",modelname.lower()+"_"+version+".tar",modelname.lower()+":"+version], cwd=model_dockerSetup) dockerfilepath = os.path.join(model_dockerSetup,modelname.lower()+"_"+version+".tar") shutil.copyfile(dockerfilepath, os.path.join(filename,modelname.lower()+"_"+version+".tar")) shutil.rmtree(model_dockerSetup) return 'Success','SUCCESSFULLY' except Exception as e: print("Error: "+str(e)) shutil.rmtree(model_dockerSetup) return 'Error',str(e) #createDockerImage(deploymentfolder,modelname,version)
__init__.py
null
requirements.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' from importlib.metadata import version import sys import os def requirementfile(deploy_path,model,textFeatures,learner_type): print('hola', model) modules = ['pandas','numpy','alibi','matplotlib','joblib','shap','ipython','category_encoders','scikit-learn','word2number','flask_restful','evidently','Flask-Cors'] requires = '' for mod in modules: requires += f"{mod}=={version(mod)}\n" if len(textFeatures) > 0: tmodules = ['spacy','nltk','textblob','demoji','beautifulsoup4','text-unidecode','pyspellchecker','contractions','protobuf'] for mod in tmodules: requires += f"{mod}=={version(mod)}\n" if model == 'Extreme Gradient Boosting (XGBoost)': mmodules = ['xgboost'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model == 'Light Gradient Boosting (LightGBM)': mmodules = ['lightgbm'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model == 'Categorical Boosting (CatBoost)': mmodules = ['catboost'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model.lower() == 'arima': mmodules = ['pmdarima'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model.lower() == 'fbprophet': mmodules = ['prophet'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model.lower() == 'lstm' or model.lower() == 'mlp' or learner_type =='DL': mmodules = ['tensorflow'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model.lower() in ['cox', 'kaplanmeierfitter']: #bug 12833 mmodules = ['lifelines'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" if model.lower() == 'sentencetransformer': #bug 12833 mmodules = ['sentence_transformers'] for mod in mmodules: requires += f"{mod}=={version(mod)}\n" filename = os.path.join(deploy_path,'requirements.txt') f = open(filename, "wb") f.write(str(requires).encode('utf8')) f.close()
eion_compress.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import platform import sys import subprocess import glob import shutil import time from aion_deployment.EncryptPythonSourceCode import encrypt_files import json def encrypt(alldirs): for dir in alldirs: try: encrypt_files(dir) except Exception as error_obj: print("Exception in encrypting", error_obj) print("-"*50) def replace_by_compressed(alldirs): for dir in alldirs: try: #print("Processing dir", dir) files = [f for f in glob.glob(dir + "/*.py")] secure_path = os.path.join(dir, 'SecuredScripts') time.sleep(6) for file in files: try: filename_w_ext = os.path.basename(file) filename, file_extension = os.path.splitext(filename_w_ext) if filename == "__init__": continue #print("Processing file", file) file_folder_path = os.path.join(secure_path, filename, 'dist') compressed_file_path = os.path.join(file_folder_path, filename+'_compressed.py') shutil.copy(compressed_file_path, dir) os.remove(file) new_compressed_file_path = os.path.join(dir, filename+'_compressed.py') target_file_path = os.path.join(dir, filename_w_ext) os.rename(new_compressed_file_path, target_file_path) if filename == 'aion_prediction': shutil.copytree(os.path.join(file_folder_path, 'pytransform'), os.path.join(dir, 'pytransform')) except Exception as error_obj: print("Exception in file ", error_obj) shutil.rmtree(secure_path) except Exception as error_obj: print("Exception in dir ", error_obj) def start_Obfuscate(path): project_path = path subdirs = [dI for dI in os.listdir(project_path) if os.path.isdir(os.path.join(project_path,dI))] alldirs = [ project_path, ] for subdir in subdirs: if(subdir != 'pytransform'): alldirs.append(os.path.join(project_path, subdir)) encrypt(alldirs) replace_by_compressed(alldirs) if __name__=="__main__": project_path = sys.argv[1] print("project_path", project_path) subdirs = [dI for dI in os.listdir(project_path) if os.path.isdir(os.path.join(project_path,dI))] alldirs = [ project_path, ] for subdir in subdirs: alldirs.append(os.path.join(project_path, subdir)) encrypt(alldirs) print("*"*50) replace_by_compressed(alldirs) # python eion_compress.py "C:\Users\ashwani.s\Desktop\22April\22April\Mohita" "C:\Users\ashwani.s\Desktop\eion\eion" > logfile.log
production.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' from pathlib import Path from AION.prediction_package.imports import importModule from AION.prediction_package.aion_prediction import aionPrediction from AION.prediction_package.utility import TAB_CHAR from AION.prediction_package import utility from AION.prediction_package import common from AION.prediction_package.base import deployer def is_supported(problem_type, algo=None): """ Return True if problem_type supported otherwise False """ supported = ['classification','regression','clustering','timeseriesforecasting','Text Similarity'] return problem_type in supported def get_deployer(problem_type, algo=None, params={}): """ Return deployer class object based on problem type Raise error if no class is associated with problem type """ params['problem_type'] = problem_type if problem_type == 'classification': return classification( params) elif problem_type == 'regression': return regression( params) elif problem_type == 'clustering': return clustering( params) elif problem_type == 'timeseriesforecasting': from AION.prediction_package.time_series import forecasting return forecasting.get_deployer( params) elif problem_type == 'Text Similarity': return textSimilarity( params) else: raise ValueError('deployment is not supported') class classification( deployer): def __init__(self, params={}): super().__init__( params) self.feature_reducer = False if not self.name: self.name = 'classification' def create_idrift(self): obj = aionPrediction() if self.params['features']['text_feat']: obj.create_text_drift_file(self.deploy_path,self.params['features']['text_feat'],self.params['features']['target_feat'],self.name) else: obj.create_drift_file(self.deploy_path,self.params['features']['input_feat'],self.params['features']['target_feat'],self.name) def create_odrift(self): obj = aionPrediction() if self.params['features']['text_feat']: obj.create_classification_text_performance_file(self.deploy_path,self.params['features']['text_feat'],self.params['features']['target_feat']) else: obj.create_classification_performance_file(self.deploy_path,self.params['features']['input_feat'],self.params['features']['target_feat']) def training_code( self): self.importer.addModule(module='pandas',mod_as='pd') code = f""" class trainer(): """ init_code, run_code = self._get_train_code() return code + init_code + run_code def _get_train_code(self): init_code = f""" def __init__( self): model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}" if not model_file.exists(): raise ValueError(f'Trained model file not found: {{model_file}}')""" run_code = f""" def run(self, df):\ """ if self.params['training']['algo'] in ['Neural Network']: self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models') init_code += f""" self.model = load_model(model_file) """ run_code += """ df = df.astype(np.float32) return pd.DataFrame(np.argmax(self.model.predict(df),axis=1)) """ elif self.params['training']['algo'] in ['Neural Architecture Search']: self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models') self.importer.addModule(module='autokeras',mod_as='ak') init_code += f""" self.model = load_model(model_file,custom_objects=ak.CUSTOM_OBJECTS) """ run_code += """ df = df.astype(np.float32) return pd.DataFrame(self.model.predict(df)) """ elif self.params['training']['algo'] in ['Deep Q Network','Dueling Deep Q Network']: self.importer.addModule('joblib') self.importer.addModule(module='numpy',mod_as='np') self.importer.addModule(module='constant',mod_from='tensorflow') self.importer.addModule(module='time_step',mod_from='tf_agents.trajectories') init_code += f""" self.model = joblib.load(model_file) """ run_code += """ df = df.astype(np.float32) q, _ = self.model(np.array(df), step_type=constant([time_step.StepType.FIRST] * np.array(df).shape[0]), training=False) return pd.DataFrame(q.numpy()) """ elif self.params['training']['algo'] in ['Convolutional Neural Network (1D)','Recurrent Neural Network','Recurrent Neural Network (GRU)','Recurrent Neural Network (LSTM)']: self.importer.addModule(module='numpy',mod_as='np') self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models') init_code += f""" self.model = load_model(model_file) """ run_code += """ df = np.expand_dims(df, axis=2) df = df.astype(np.float32) return pd.DataFrame(np.argmax(self.model.predict(df),axis=1)) """ else: self.importer.addModule(module='joblib') self.importer.addModule(module='numpy',mod_as='np') init_code += f""" self.model = joblib.load(model_file) """ run_code += """ df = df.astype(np.float32) return pd.DataFrame(self.model.predict_proba(df), columns=self.model.classes_) """ return init_code, run_code def formatter_code(self): self.importer.addModule('json') self.importer.addModule('joblib') self.importer.addModule('pandas', mod_as='pd') return """ class output_format(): def __init__(self): pass def run(self, raw_df, output): output = round(output,2) encoder_file = (Path(__file__).parent/"model")/"label_encoder.pkl" if encoder_file.exists(): encoder = joblib.load(encoder_file) output.rename(columns=dict(zip(output.columns, encoder.inverse_transform(list(output.columns)))), inplace=True) raw_df['prediction'] = output.idxmax(axis=1) raw_df['probability'] = output.max(axis=1).round(2) raw_df['remarks'] = output.apply(lambda x: x.to_json(double_precision=2), axis=1) outputjson = raw_df.to_json(orient='records',double_precision=5) outputjson = {"status":"SUCCESS","data":json.loads(outputjson)} return(json.dumps(outputjson)) """ class regression( deployer): def __init__(self, params={}): super().__init__( params) self.feature_reducer = False if not self.name: self.name = 'regression' def create_idrift(self): obj = aionPrediction() if self.params['features']['text_feat']: obj.create_text_drift_file(self.deploy_path,self.params['features']['text_feat'],self.params['features']['target_feat'],self.name) else: obj.create_drift_file(self.deploy_path,self.params['features']['input_feat'],self.params['features']['target_feat'],self.name) def create_odrift(self): obj = aionPrediction() if self.params['features']['text_feat']: obj.create_regression_text_performance_file(self.deploy_path,self.params['features']['text_feat'],self.params['features']['target_feat']) else: obj.create_regression_performance_file(self.deploy_path,self.params['features']['input_feat'],self.params['features']['target_feat']) def training_code( self): self.importer.addModule(module='pandas',mod_as='pd') code = f""" class trainer(): """ init_code = f""" def __init__( self): model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}" if not model_file.exists(): raise ValueError(f'Trained model file not found: {{model_file}}') """ run_code = f""" def run(self, df):\ """ if self.params['training']['algo'] in ['Neural Architecture Search']: self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models') self.importer.addModule(module='autokeras',mod_as='ak') init_code += f""" self.model = load_model(model_file,custom_objects=ak.CUSTOM_OBJECTS) """ run_code += """ df = df.astype(np.float32) return self.model.predict(df).reshape(1, -1) """ elif self.params['training']['algo'] in ['Neural Network','Convolutional Neural Network (1D)','Recurrent Neural Network','Recurrent Neural Network (GRU)','Recurrent Neural Network (LSTM)']: self.importer.addModule(module='numpy',mod_as='np') self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models') init_code += f""" self.model = load_model(model_file) """ run_code += """ df = np.expand_dims(df, axis=2) df = df.astype(np.float32) return self.model.predict(df).reshape(1, -1) """ else: self.importer.addModule('joblib') init_code += f""" self.model = joblib.load(model_file) """ run_code += """ df = df.astype(np.float32) return self.model.predict(df).reshape(1, -1) """ return code + init_code + run_code def formatter_code(self): self.importer.addModule('json') self.importer.addModule('pandas', mod_as='pd') return """ class output_format(): def __init__(self): pass def run(self, raw_df, output): raw_df['prediction'] = output[0] raw_df['prediction'] = raw_df['prediction'].round(2) outputjson = raw_df.to_json(orient='records',double_precision=5) outputjson = {"status":"SUCCESS","data":json.loads(outputjson)} return(json.dumps(outputjson)) """ class clustering( deployer): def __init__(self, params={}): super().__init__( params) self.feature_reducer = False if not self.name: self.name = 'clustering' def training_code( self): self.importer.addModule('joblib') self.importer.addModule(module='pandas',mod_as='pd') code = f""" class trainer(): """ init_code = f""" def __init__( self): model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}" if not model_file.exists(): raise ValueError(f'Trained model file not found: {{model_file}}') """ run_code = f""" def run(self, df):\ """ if self.params['training']['algo'] == 'DBSCAN': init_code += f""" self.model = joblib.load(model_file) """ run_code += """ return self.model.fit_predict(df) """ else: init_code += f""" self.model = joblib.load(model_file) """ run_code += """ return self.model.predict(df).reshape(1, -1) """ return code + init_code + run_code def formatter_code(self): self.importer.addModule('json') self.importer.addModule('pandas', mod_as='pd') return """ class output_format(): def __init__(self): pass def run(self, raw_df, output): raw_df['prediction'] = output[0] raw_df['prediction'] = raw_df['prediction'].round(2) outputjson = raw_df.to_json(orient='records',double_precision=2) outputjson = {"status":"SUCCESS","data":json.loads(outputjson)} return(json.dumps(outputjson)) """ return code if __name__ == '__main__': config = {'usecase_name': 'AI0110', 'usecase_ver': '1', 'features': {'input_feat': ['v2'], 'target_feat': 'v1', 'text_feat': ['v2']}, 'paths': {'deploy': r'C:/Users/vashistah/AppData/Local/Programs/HCLTech/AION/data/target/AI0110/1', 'usecase': r'C:/Users/vashistah/AppData/Local/Programs/HCLTech/AION/data/target/AI0110'}, 'profiler': {'input_features': ['v2'], 'output_features': ['07xxxxxxxxx_vect', '08700621170150p_vect', '08702840625comuk_vect', '08718726270150gbpmtmsg18_vect', '1000s_vect', '10am7pm_vect', '10k_vect', '10p_vect', '10pmin_vect', '10ppm_vect', '11mths_vect', '125gift_vect', '12hrs_vect', '12mths_vect', '150p_vect', '150perwksub_vect', '150pm_vect', '150pmin_vect', '150pmsg_vect', '150pmsgrcvdhgsuite3422landsroww1j6hl_vect', '150pmtmsgrcvd18_vect', '150ppm_vect', '150ptone_vect', '150pwk_vect', '150week_vect', '16only_vect', '18only_vect', '1hr_vect', '1minmobsmorelkpobox177hp51fl_vect', '1st_vect', '1x150pwk_vect', '20p_vect', '20pmin_vect', '21st_vect', '220cm2_vect', '24hrs_vect', '25p_vect', '26th_vect', '2day_vect', '2find_vect', '2geva_vect', '2go_vect', '2marrow_vect', '2mrw_vect', '2nd_vect', '2nite_vect', '2optout_vect', '2p_vect', '2u_vect', '2waxsto_vect', '2wks_vect', '300p_vect', '31pmsg_vect', '3510i_vect', '3d_vect', '3g_vect', '3gbp_vect', '3hrs_vect', '3mins_vect', '3qxj9_vect', '3rd_vect', '3ss_vect', '3u_vect', '3uz_vect', '3wk_vect', '40gb_vect', '4a_vect', '4d_vect', '4eva_vect', '4get_vect', '4info_vect', '4mths_vect', '4th_vect', '4u_vect', '50p_vect', '5min_vect', '5pm_vect', '5wb_vect', '5we_vect', '60pmin_vect', '6hrs_vect', '6months_vect', '6pm_vect', '7250i_vect', '7ish_vect', '8am_vect', '8pm_vect', '8th_vect', '8wp_vect', '9ae_vect', '9ja_vect', '9pm_vect', '9t_vect', 'aathi_vect', 'abi_vect', 'ability_vect', 'abiola_vect', 'able_vect', 'abt_vect', 'abta_vect', 'aburo_vect', 'ac_vect', 'academic_vect', 'acc_vect', 'accept_vect', 'access_vect', 'accident_vect', 'accidentally_vect', 'accordingly_vect', 'account_vect', 'ache_vect', 'across_vect', 'acted_vect', 'action_vect', 'activate_vect', 'activities_vect', 'actor_vect', 'actual_vect', 'actually_vect', 'ad_vect', 'adam_vect', 'add_vect', 'added_vect', 'addicted_vect', 'addie_vect', 'address_vect', 'admin_vect', 'administrator_vect', 'admirer_vect', 'admit_vect', 'adore_vect', 'adoring_vect', 'ads_vect', 'adult_vect', 'advance_vect', 'adventure_vect', 'advice_vect', 'advise_vect', 'affair_vect', 'affairs_vect', 'affectionate_vect', 'afraid_vect', 'aft_vect', 'afternoon_vect', 'aftr_vect', 'agalla_vect', 'age_vect', 'age16_vect', 'ages_vect', 'ago_vect', 'agree_vect', 'ah_vect', 'aha_vect', 'ahead_vect', 'ahmad_vect', 'ai_vect', 'aight_vect', 'aint_vect', 'air_vect', 'airport_vect', 'airtel_vect', 'aiya_vect', 'aiyah_vect', 'aiyar_vect', 'aiyo_vect', 'al_vect', 'album_vect', 'alert_vect', 'alex_vect', 'alfie_vect', 'ali_vect', 'allah_vect', 'allow_vect', 'allowed_vect', 'almost_vect', 'alone_vect', 'along_vect', 'already_vect', 'alright_vect', 'alrite_vect', 'also_vect', 'always_vect', 'alwys_vect', 'amazing_vect', 'american_vect', 'among_vect', 'amount_vect', 'amp_vect', 'amt_vect', 'andros_vect', 'angry_vect', 'annie_vect', 'anniversary_vect', 'announcement_vect', 'anot_vect', 'another_vect', 'ans_vect', 'ansr_vect', 'answer_vect', 'answered_vect', 'answering_vect', 'answers_vect', 'anthony_vect', 'anti_vect', 'anybody_vect', 'anymore_vect', 'anyone_vect', 'anything_vect', 'anytime_vect', 'anyway_vect', 'anyways_vect', 'apartment_vect', 'app_vect', 'apparently_vect', 'applebees_vect', 'apply_vect', 'appointment_vect', 'appreciate_vect', 'appreciated_vect', 'approx_vect', 'apps_vect', 'appt_vect', 'april_vect', 'ar_vect', 'arcade_vect', 'ard_vect', 'area_vect', 'argh_vect', 'argument_vect', 'arm_vect', 'armand_vect', 'arms_vect', 'around_vect', 'arrange_vect', 'arrested_vect', 'arrive_vect', 'arsenal_vect', 'art_vect', 'arun_vect', 'asap_vect', 'ashley_vect', 'ask_vect', 'askd_vect', 'asked_vect', 'askin_vect', 'asking_vect', 'asks_vect', 'asleep_vect', 'ass_vect', 'assume_vect', 'ate_vect', 'atlanta_vect', 'atlast_vect', 'atm_vect', 'attached_vect', 'attempt_vect', 'attend_vect', 'auction_vect', 'august_vect', 'aunt_vect', 'aunty_vect', 'auto_vect', 'av_vect', 'available_vect', 'avatar_vect', 'ave_vect', 'avent_vect', 'avoid_vect', 'await_vect', 'awaiting_vect', 'awake_vect', 'award_vect', 'awarded_vect', 'away_vect', 'awesome_vect', 'aww_vect', 'b4_vect', 'ba_vect', 'babe_vect', 'babes_vect', 'babies_vect', 'baby_vect', 'back_vect', 'bad_vect', 'bag_vect', 'bags_vect', 'bahamas_vect', 'bak_vect', 'balance_vect', 'bank_vect', 'banks_vect', 'bar_vect', 'barely_vect', 'basic_vect', 'basically_vect', 'bat_vect', 'bath_vect', 'bathe_vect', 'bathing_vect', 'battery_vect', 'bay_vect', 'bb_vect', 'bc_vect', 'bck_vect', 'bcoz_vect', 'bday_vect', 'be_vect', 'bears_vect', 'beautiful_vect', 'beauty_vect', 'bec_vect', 'become_vect', 'becoz_vect', 'bed_vect', 'bedrm_vect', 'bedroom_vect', 'beer_vect', 'befor_vect', 'beg_vect', 'begin_vect', 'behave_vect', 'behind_vect', 'bein_vect', 'believe_vect', 'bell_vect', 'belly_vect', 'belovd_vect', 'best_vect', 'bet_vect', 'better_vect', 'beyond_vect', 'bf_vect', 'bid_vect', 'bids_vect', 'big_vect', 'bigger_vect', 'biggest_vect', 'bill_vect', 'billed_vect', 'billion_vect', 'bills_vect', 'bin_vect', 'biola_vect', 'birds_vect', 'birla_vect', 'birth_vect', 'birthdate_vect', 'birthday_vect', 'bishan_vect', 'bit_vect', 'bitch_vect', 'bite_vect', 'black_vect', 'blackberry_vect', 'blah_vect', 'blake_vect', 'blank_vect', 'bleh_vect', 'bless_vect', 'blessing_vect', 'bloo_vect', 'blood_vect', 'bloody_vect', 'blue_vect', 'bluetooth_vect', 'bluff_vect', 'boat_vect', 'body_vect', 'bold_vect', 'bone_vect', 'bonus_vect', 'boo_vect', 'book_vect', 'booked_vect', 'booking_vect', 'books_vect', 'boost_vect', 'booty_vect', 'bored_vect', 'boring_vect', 'born_vect', 'boss_vect', 'boston_vect', 'bother_vect', 'bottom_vect', 'bought_vect', 'bout_vect', 'bowl_vect', 'box_vect', 'box326_vect', 'box334sk38ch_vect', 'box97n7qp_vect', 'boy_vect', 'boye_vect', 'boyfriend_vect', 'boys_vect', 'boytoy_vect', 'brah_vect', 'brand_vect', 'bread_vect', 'break_vect', 'breathe_vect', 'bright_vect', 'brilliant_vect', 'bring_vect', 'bringing_vect', 'brings_vect', 'british_vect', 'bro_vect', 'broad_vect', 'broke_vect', 'broken_vect', 'bros_vect', 'brothas_vect', 'brother_vect', 'brought_vect', 'bruv_vect', 'bslvyl_vect', 'bt_vect', 'btnationalrate_vect', 'btw_vect', 'bucks_vect', 'bud_vect', 'budget_vect', 'buff_vect', 'buffet_vect', 'bugis_vect', 'building_vect', 'buns_vect', 'burger_vect', 'burns_vect', 'bus_vect', 'buses_vect', 'business_vect', 'busy_vect', 'butt_vect', 'buy_vect', 'buying_vect', 'buzz_vect', 'bx420_vect', 'bx420ip45we_vect', 'bye_vect', 'ca_vect', 'cabin_vect', 'cafe_vect', 'cake_vect', 'cal_vect', 'calculation_vect', 'calicut_vect', 'california_vect', 'call_vect', 'call2optout674_vect', 'callback_vect', 'callcost_vect', 'called_vect', 'caller_vect', 'callers_vect', 'callertune_vect', 'callin_vect', 'calling_vect', 'calls_vect', 'callså_vect', 'cam_vect', 'camcorder_vect', 'came_vect', 'camera_vect', 'cameravideo_vect', 'campus_vect', 'can_vect', 'canada_vect', 'canal_vect', 'canary_vect', 'cancel_vect', 'cancelled_vect', 'cancer_vect', 'cant_vect', 'captain_vect', 'car_vect', 'card_vect', 'cardiff_vect', 'care_vect', 'cared_vect', 'career_vect', 'careful_vect', 'carefully_vect', 'caring_vect', 'carlos_vect', 'caroline_vect', 'cars_vect', 'cartoon_vect', 'case_vect', 'cash_vect', 'cashbalance_vect', 'cashin_vect', 'castor_vect', 'cat_vect', 'catch_vect', 'catching_vect', 'caught_vect', 'cause_vect', 'cbe_vect', 'cc_vect', 'cd_vect', 'cdgt_vect', 'cds_vect', 'celebrate_vect', 'celebration_vect', 'cell_vect', 'center_vect', 'centre_vect', 'certainly_vect', 'cha_vect', 'chain_vect', 'challenge_vect', 'chance_vect', 'change_vect', 'changed_vect', 'changes_vect', 'channel_vect', 'character_vect', 'charge_vect', 'charged_vect', 'charges_vect', 'charity_vect', 'charles_vect', 'chase_vect', 'chasing_vect', 'chat_vect', 'chatting_vect', 'cheap_vect', 'cheaper_vect', 'cheat_vect', 'chechi_vect', 'check_vect', 'checked_vect', 'checking_vect', 'cheers_vect', 'chennai_vect', 'cherish_vect', 'chest_vect', 'chicken_vect', 'chikku_vect', 'child_vect', 'childish_vect', 'children_vect', 'chill_vect', 'chillin_vect', 'china_vect', 'chinese_vect', 'chip_vect', 'chocolate_vect', 'choice_vect', 'choose_vect', 'chosen_vect', 'christ_vect', 'christmas_vect', 'church_vect', 'cine_vect', 'cinema_vect', 'citizen_vect', 'city_vect', 'claim_vect', 'claims_vect', 'claire_vect', 'class_vect', 'classes_vect', 'clean_vect', 'cleaning_vect', 'clear_vect', 'clearly_vect', 'click_vect', 'clock_vect', 'close_vect', 'closed_vect', 'closer_vect', 'closes_vect', 'clothes_vect', 'club_vect', 'cn_vect', 'co_vect', 'coast_vect', 'coat_vect', 'cochin_vect', 'code_vect', 'coffee_vect', 'coin_vect', 'coins_vect', 'cold_vect', 'colleagues_vect', 'collect_vect', 'collected_vect', 'collecting_vect', 'collection_vect', 'college_vect', 'colour_vect', 'come_vect', 'comedy_vect', 'comes_vect', 'comin_vect', 'coming_vect', 'commercial_vect', 'common_vect', 'community_vect', 'comp_vect', 'company_vect', 'competition_vect', 'complete_vect', 'completed_vect', 'completely_vect', 'complimentary_vect', 'computer_vect', 'concentrate_vect', 'concert_vect', 'conditions_vect', 'conducts_vect', 'confidence_vect', 'confirm_vect', 'congrats_vect', 'congratulations_vect', 'connection_vect', 'consider_vect', 'considering_vect', 'constant_vect', 'constantly_vect', 'contact_vect', 'contacted_vect', 'contacts_vect', 'content_vect', 'contents_vect', 'continue_vect', 'contract_vect', 'control_vect', 'convey_vect', 'convinced_vect', 'cool_vect', 'coping_vect', 'copy_vect', 'cornwall_vect', 'correct_vect', 'cos_vect', 'cost_vect', 'costa_vect', 'costs_vect', 'costå_vect', 'could_vect', 'count_vect', 'countin_vect', 'country_vect', 'couple_vect', 'course_vect', 'cover_vect', 'coz_vect', 'cr9_vect', 'cramps_vect', 'crave_vect', 'crazy_vect', 'created_vect', 'credit_vect', 'credited_vect', 'credits_vect', 'creepy_vect', 'crisis_vect', 'crore_vect', 'cross_vect', 'croydon_vect', 'cruise_vect', 'cry_vect', 'cs_vect', 'csbcm4235wc1n3xx_vect', 'csstop_vect', 'cud_vect', 'cuddling_vect', 'cum_vect', 'cup_vect', 'curious_vect', 'current_vect', 'currently_vect', 'cust_vect', 'custcare_vect', 'custcare08718720201_vect', 'custom_vect', 'customer_vect', 'customers_vect', 'cut_vect', 'cute_vect', 'cutting_vect', 'cuz_vect', 'cw25wx_vect', 'da_vect', 'dad_vect', 'daddy_vect', 'daily_vect', 'damn_vect', 'dance_vect', 'dancing_vect', 'dare_vect', 'dark_vect', 'darlin_vect', 'darling_vect', 'darren_vect', 'dat_vect', 'date_vect', 'dates_vect', 'dating_vect', 'dave_vect', 'day_vect', 'days_vect', 'de_vect', 'dead_vect', 'deal_vect', 'dealer_vect', 'dealing_vect', 'dear_vect', 'dearly_vect', 'death_vect', 'decide_vect', 'decided_vect', 'decimal_vect', 'decision_vect', 'deep_vect', 'def_vect', 'definite_vect', 'definitely_vect', 'del_vect', 'delete_vect', 'deleted_vect', 'delhi_vect', 'deliver_vect', 'delivered_vect', 'deliveredtomorrow_vect', 'delivery_vect', 'dem_vect', 'demand_vect', 'den_vect', 'denis_vect', 'department_vect', 'depends_vect', 'depressed_vect', 'derek_vect', 'desires_vect', 'desperate_vect', 'details_vect', 'dey_vect', 'dhoni_vect', 'dial_vect', 'dick_vect', 'dictionary_vect', 'didn_vect', 'didnt_vect', 'didt_vect', 'die_vect', 'died_vect', 'diet_vect', 'different_vect', 'difficult_vect', 'digital_vect', 'dignity_vect', 'din_vect', 'dinner_vect', 'dint_vect', 'direct_vect', 'directly_vect', 'dirty_vect', 'dis_vect', 'discount_vect', 'discuss_vect', 'dislikes_vect', 'display_vect', 'distance_vect', 'distract_vect', 'disturb_vect', 'division_vect', 'dload_vect', 'dnt_vect', 'doc_vect', 'docs_vect', 'doctor_vect', 'doesnt_vect', 'dog_vect', 'dogging_vect', 'doggy_vect', 'doin_vect', 'dollars_vect', 'don_vect', 'done_vect', 'dont_vect', 'donåõt_vect', 'door_vect', 'dorm_vect', 'double_vect', 'dough_vect', 'download_vect', 'downloads_vect', 'draw_vect', 'dream_vect', 'dreams_vect', 'dress_vect', 'dressed_vect', 'dresser_vect', 'drink_vect', 'drinking_vect', 'drinks_vect', 'drive_vect', 'driver_vect', 'drivin_vect', 'driving_vect', 'drop_vect', 'dropped_vect', 'drug_vect', 'drugs_vect', 'drunk_vect', 'dry_vect', 'ds_vect', 'dubsack_vect', 'dude_vect', 'due_vect', 'dun_vect', 'dunno_vect', 'durban_vect', 'dvd_vect', 'earlier_vect', 'early_vect', 'earth_vect', 'easier_vect', 'easily_vect', 'east_vect', 'easter_vect', 'easy_vect', 'eat_vect', 'eaten_vect', 'eatin_vect', 'eating_vect', 'ebay_vect', 'ec2a_vect', 'ee_vect', 'eek_vect', 'eerie_vect', 'effects_vect', 'eg_vect', 'egg_vect', 'eggs_vect', 'eh_vect', 'eight_vect', 'either_vect', 'ela_vect', 'electricity_vect', 'else_vect', 'elsewhere_vect', 'em_vect', 'email_vect', 'embarassed_vect', 'empty_vect', 'end_vect', 'ended_vect', 'ending_vect', 'ends_vect', 'enemy_vect', 'energy_vect', 'eng_vect', 'engin_vect', 'england_vect', 'english_vect', 'enjoy_vect', 'enjoyed_vect', 'enough_vect', 'enter_vect', 'entered_vect', 'entitled_vect', 'entry_vect', 'enuff_vect', 'envelope_vect', 'er_vect', 'erm_vect', 'escape_vect', 'especially_vect', 'esplanade_vect', 'eta_vect', 'etc_vect', 'euro_vect', 'euro2004_vect', 'europe_vect', 'eve_vect', 'eveb_vect', 'even_vect', 'evening_vect', 'event_vect', 'ever_vect', 'every_vect', 'everybody_vect', 'everyday_vect', 'everyone_vect', 'everything_vect', 'everywhere_vect', 'evn_vect', 'evng_vect', 'ex_vect', 'exact_vect', 'exactly_vect', 'exam_vect', 'exams_vect', 'excellent_vect', 'except_vect', 'exciting_vect', 'excuse_vect', 'excuses_vect', 'executive_vect', 'exeter_vect', 'exhausted_vect', 'expect_vect', 'expecting_vect', 'expensive_vect', 'experience_vect', 'expired_vect', 'expires_vect', 'explain_vect', 'explicit_vect', 'explosive_vect', 'express_vect', 'extra_vect', 'eye_vect', 'eyes_vect', 'fa_vect', 'fab_vect', 'face_vect', 'facebook_vect', 'fact_vect', 'faggy_vect', 'failed_vect', 'fair_vect', 'faith_vect', 'fall_vect', 'falls_vect', 'family_vect', 'fan_vect', 'fancies_vect', 'fancy_vect', 'fantasies_vect', 'fantastic_vect', 'fantasy_vect', 'far_vect', 'farm_vect', 'fast_vect', 'faster_vect', 'fat_vect', 'father_vect', 'fathima_vect', 'fault_vect', 'fave_vect', 'favorite_vect', 'favour_vect', 'favourite_vect', 'fb_vect', 'feb_vect', 'february_vect', 'feel_vect', 'feelin_vect', 'feeling_vect', 'feels_vect', 'fees_vect', 'feet_vect', 'fell_vect', 'felt_vect', 'fetch_vect', 'fever_vect', 'field_vect', 'fifteen_vect', 'fight_vect', 'fighting_vect', 'figure_vect', 'file_vect', 'files_vect', 'fill_vect', 'filling_vect', 'fills_vect', 'film_vect', 'final_vect', 'finally_vect', 'find_vect', 'fine_vect', 'fingers_vect', 'finish_vect', 'finished_vect', 'first_vect', 'fit_vect', 'fix_vect', 'fixed_vect', 'flag_vect', 'flaked_vect', 'flash_vect', 'flat_vect', 'flight_vect', 'flights_vect', 'flirt_vect', 'floor_vect', 'flower_vect', 'fml_vect', 'fo_vect', 'follow_vect', 'followed_vect', 'following_vect', 'fone_vect', 'food_vect', 'fool_vect', 'football_vect', 'force_vect', 'foreign_vect', 'forever_vect', 'forevr_vect', 'forget_vect', 'forgets_vect', 'forgiven_vect', 'forgot_vect', 'format_vect', 'forums_vect', 'forward_vect', 'forwarded_vect', 'found_vect', 'four_vect', 'fr_vect', 'fran_vect', 'freak_vect', 'free_vect', 'freefone_vect', 'freemsg_vect', 'freephone_vect', 'freezing_vect', 'fren_vect', 'frens_vect', 'fret_vect', 'fri_vect', 'friday_vect', 'friend_vect', 'friends_vect', 'friendship_vect', 'fringe_vect', 'frm_vect', 'frnd_vect', 'frnds_vect', 'frndship_vect', 'fuck_vect', 'fuckin_vect', 'fucking_vect', 'ful_vect', 'full_vect', 'fullonsmscom_vect', 'fun_vect', 'funny_vect', 'future_vect', 'fyi_vect', 'gal_vect', 'gals_vect', 'game_vect', 'games_vect', 'gang_vect', 'gap_vect', 'gaps_vect', 'garage_vect', 'garbage_vect', 'gary_vect', 'gas_vect', 'gautham_vect', 'gave_vect', 'gay_vect', 'gd_vect', 'ge_vect', 'gee_vect', 'geeee_vect', 'geeeee_vect', 'gender_vect', 'generally_vect', 'genius_vect', 'gentle_vect', 'gentleman_vect', 'gently_vect', 'germany_vect', 'get_vect', 'gets_vect', 'gettin_vect', 'getting_vect', 'gf_vect', 'gibbs_vect', 'gift_vect', 'gim_vect', 'girl_vect', 'girls_vect', 'gist_vect', 'giv_vect', 'give_vect', 'given_vect', 'gives_vect', 'giving_vect', 'glad_vect', 'gn_vect', 'go_vect', 'goal_vect', 'god_vect', 'goes_vect', 'goin_vect', 'going_vect', 'gold_vect', 'gon_vect', 'gona_vect', 'gone_vect', 'good_vect', 'goodmorning_vect', 'goodnight_vect', 'goodnite_vect', 'google_vect', 'gorgeous_vect', 'gossip_vect', 'got_vect', 'goto_vect', 'gotten_vect', 'govtinstituitions_vect', 'gr8_vect', 'grace_vect', 'gram_vect', 'grand_vect', 'granite_vect', 'gravity_vect', 'great_vect', 'green_vect', 'greet_vect', 'greetings_vect', 'grins_vect', 'grl_vect', 'ground_vect', 'group_vect', 'gt_vect', 'guaranteed_vect', 'gud_vect', 'gudnite_vect', 'guess_vect', 'guessing_vect', 'guide_vect', 'guilty_vect', 'guy_vect', 'guys_vect', 'gym_vect', 'ha_vect', 'haf_vect', 'haha_vect', 'hai_vect', 'hair_vect', 'haiz_vect', 'half_vect', 'halloween_vect', 'ham_vect', 'hand_vect', 'handed_vect', 'handle_vect', 'hands_vect', 'handset_vect', 'hanging_vect', 'happen_vect', 'happened_vect', 'happening_vect', 'happens_vect', 'happiness_vect', 'happy_vect', 'hard_vect', 'hardcore_vect', 'hardly_vect', 'harry_vect', 'hate_vect', 'hav_vect', 'havent_vect', 'havenåõt_vect', 'havin_vect', 'head_vect', 'headache_vect', 'headin_vect', 'heads_vect', 'hear_vect', 'heard_vect', 'heart_vect', 'heavy_vect', 'hee_vect', 'height_vect', 'held_vect', 'helen_vect', 'hell_vect', 'hella_vect', 'hello_vect', 'help_vect', 'help08712400602450p_vect', 'helpline_vect', 'hence_vect', 'henry_vect', 'heri_vect', 'herlove_vect', 'hes_vect', 'hex_vect', 'hey_vect', 'hgsuite3422lands_vect', 'hi_vect', 'hide_vect', 'high_vect', 'hill_vect', 'hint_vect', 'hip_vect', 'history_vect', 'hit_vect', 'hiya_vect', 'hl_vect', 'hlp_vect', 'hmm_vect', 'hmmm_vect', 'hmv_vect', 'ho_vect', 'hockey_vect', 'hol_vect', 'hold_vect', 'holder_vect', 'holding_vect', 'holiday_vect', 'holla_vect', 'hols_vect', 'holy_vect', 'home_vect', 'honey_vect', 'hook_vect', 'hop_vect', 'hope_vect', 'hoped_vect', 'hopefully_vect', 'hoping_vect', 'horny_vect', 'horo_vect', 'horrible_vect', 'hospital_vect', 'hospitals_vect', 'hostel_vect', 'hot_vect', 'hotel_vect', 'hotels_vect', 'hour_vect', 'hours_vect', 'house_vect', 'housemaid_vect', 'however_vect', 'hows_vect', 'howz_vect', 'hr_vect', 'hrishi_vect', 'hrs_vect', 'http_vect', 'hubby_vect', 'hug_vect', 'huh_vect', 'hun_vect', 'hungry_vect', 'hunny_vect', 'hurry_vect', 'hurt_vect', 'hurting_vect', 'hurts_vect', 'husband_vect', 'hv_vect', 'hw_vect', 'hyde_vect', 'iam_vect', 'ibhltd_vect', 'ibiza_vect', 'ic_vect', 'ice_vect', 'id_vect', 'idea_vect', 'ideal_vect', 'ideas_vect', 'identifier_vect', 'idiot_vect', 'idk_vect', 'ignore_vect', 'ikea_vect', 'il_vect', 'ill_vect', 'im_vect', 'imagine_vect', 'imma_vect', 'immediately_vect', 'imp_vect', 'important_vect', 'impossible_vect', 'improved_vect', 'in2_vect', 'inc_vect', 'inches_vect', 'incident_vect', 'include_vect', 'including_vect', 'inclusive_vect', 'inconsiderate_vect', 'indeed_vect', 'india_vect', 'indian_vect', 'indians_vect', 'indicate_vect', 'infections_vect', 'infernal_vect', 'info_vect', 'inform_vect', 'information_vect', 'informed_vect', 'innings_vect', 'insha_vect', 'inside_vect', 'instantly_vect', 'instead_vect', 'instructions_vect', 'insurance_vect', 'intelligent_vect', 'interest_vect', 'interested_vect', 'interesting_vect', 'interflora_vect', 'internet_vect', 'intro_vect', 'invest_vect', 'invite_vect', 'invited_vect', 'inviting_vect', 'iouri_vect', 'ip4_vect', 'ipod_vect', 'irritating_vect', 'iscoming_vect', 'ish_vect', 'island_vect', 'islands_vect', 'isnt_vect', 'issue_vect', 'issues_vect', 'it_vect', 'italian_vect', 'its_vect', 'itz_vect', 'itåõs_vect', 'ive_vect', 'iz_vect', 'izzit_vect', 'iåõm_vect', 'jacket_vect', 'jamster_vect', 'jan_vect', 'january_vect', 'jason_vect', 'java_vect', 'jay_vect', 'jazz_vect', 'jealous_vect', 'jeans_vect', 'jen_vect', 'jenny_vect', 'jess_vect', 'jesus_vect', 'jiayin_vect', 'jiu_vect', 'jo_vect', 'joanna_vect', 'job_vect', 'jogging_vect', 'john_vect', 'join_vect', 'joined_vect', 'joining_vect', 'joke_vect', 'jokes_vect', 'jokin_vect', 'joking_vect', 'jolly_vect', 'joy_vect', 'jsco_vect', 'jst_vect', 'juan_vect', 'juicy_vect', 'july_vect', 'june_vect', 'jus_vect', 'juz_vect', 'k52_vect', 'kadeem_vect', 'kaiez_vect', 'kallis_vect', 'kano_vect', 'kappa_vect', 'karaoke_vect', 'kate_vect', 'kay_vect', 'kb_vect', 'ke_vect', 'keep_vect', 'keeping_vect', 'keeps_vect', 'kent_vect', 'kept_vect', 'kerala_vect', 'key_vect', 'keys_vect', 'ki_vect', 'kick_vect', 'kid_vect', 'kidding_vect', 'kids_vect', 'kidz_vect', 'kind_vect', 'kinda_vect', 'kindly_vect', 'king_vect', 'kiss_vect', 'kisses_vect', 'kk_vect', 'knackered_vect', 'knew_vect', 'knock_vect', 'know_vect', 'knowing_vect', 'knows_vect', 'knw_vect', 'kz_vect', 'l8r_vect', 'la_vect', 'lab_vect', 'ladies_vect', 'lady_vect', 'lag_vect', 'laid_vect', 'land_vect', 'landline_vect', 'langport_vect', 'language_vect', 'laptop_vect', 'lar_vect', 'largest_vect', 'last_vect', 'late_vect', 'later_vect', 'latest_vect', 'latr_vect', 'laugh_vect', 'laughing_vect', 'law_vect', 'lazy_vect', 'ldn_vect', 'ldnw15h_vect', 'le_vect', 'lead_vect', 'learn_vect', 'least_vect', 'leave_vect', 'leaves_vect', 'leaving_vect', 'lect_vect', 'lecture_vect', 'left_vect', 'legal_vect', 'legs_vect', 'leh_vect', 'lei_vect', 'lem_vect', 'less_vect', 'lesson_vect', 'lessons_vect', 'let_vect', 'lets_vect', 'letter_vect', 'letters_vect', 'liao_vect', 'library_vect', 'lick_vect', 'licks_vect', 'lido_vect', 'lie_vect', 'lies_vect', 'life_vect', 'lifetime_vect', 'lift_vect', 'light_vect', 'lik_vect', 'like_vect', 'liked_vect', 'likely_vect', 'likes_vect', 'lil_vect', 'line_vect', 'linerental_vect', 'lines_vect', 'link_vect', 'lion_vect', 'lionm_vect', 'lionp_vect', 'lions_vect', 'lip_vect', 'list_vect', 'listen_vect', 'listening_vect', 'literally_vect', 'little_vect', 'live_vect', 'liverpool_vect', 'living_vect', 'lk_vect', 'll_vect', 'lmao_vect', 'lo_vect', 'loads_vect', 'loan_vect', 'loans_vect', 'local_vect', 'locations_vect', 'lock_vect', 'log_vect', 'login_vect', 'logo_vect', 'logopic_vect', 'lol_vect', 'london_vect', 'lonely_vect', 'long_vect', 'longer_vect', 'look_vect', 'lookatme_vect', 'looked_vect', 'lookin_vect', 'looking_vect', 'looks_vect', 'lor_vect', 'lose_vect', 'losing_vect', 'loss_vect', 'lost_vect', 'lot_vect', 'lotr_vect', 'lots_vect', 'lou_vect', 'loud_vect', 'lounge_vect', 'lousy_vect', 'lovable_vect', 'love_vect', 'loved_vect', 'lovely_vect', 'loveme_vect', 'lover_vect', 'loverboy_vect', 'lovers_vect', 'loves_vect', 'loving_vect', 'low_vect', 'lower_vect', 'loyal_vect', 'loyalty_vect', 'ls15hb_vect', 'lt_vect', 'ltd_vect', 'luck_vect', 'lucky_vect', 'lucy_vect', 'lunch_vect', 'luv_vect', 'lux_vect', 'luxury_vect', 'lyf_vect', 'lyfu_vect', 'lyk_vect', 'm227xy_vect', 'm26_vect', 'm263uz_vect', 'm8s_vect', 'mac_vect', 'machan_vect', 'macho_vect', 'mad_vect', 'madam_vect', 'made_vect', 'mag_vect', 'maga_vect', 'magical_vect', 'mah_vect', 'mail_vect', 'mailbox_vect', 'main_vect', 'maintain_vect', 'major_vect', 'make_vect', 'makes_vect', 'makin_vect', 'making_vect', 'malaria_vect', 'male_vect', 'mall_vect', 'man_vect', 'managed_vect', 'management_vect', 'many_vect', 'map_vect', 'march_vect', 'mark_vect', 'market_vect', 'marriage_vect', 'married_vect', 'marry_vect', 'masters_vect', 'match_vect', 'matches_vect', 'mate_vect', 'mates_vect', 'matrix3_vect', 'matter_vect', 'max10mins_vect', 'maximize_vect', 'maxå_vect', 'may_vect', 'mayb_vect', 'maybe_vect', 'mca_vect', 'mcat_vect', 'meal_vect', 'mean_vect', 'meaning_vect', 'means_vect', 'meant_vect', 'meanwhile_vect', 'med_vect', 'medical_vect', 'medicine_vect', 'meds_vect', 'meet_vect', 'meetin_vect', 'meeting_vect', 'meh_vect', 'mei_vect', 'member_vect', 'members_vect', 'men_vect', 'menu_vect', 'merry_vect', 'mess_vect', 'message_vect', 'messaged_vect', 'messages_vect', 'messaging_vect', 'met_vect', 'mid_vect', 'middle_vect', 'midnight_vect', 'mids_vect', 'might_vect', 'miles_vect', 'milk_vect', 'min_vect', 'mind_vect', 'mine_vect', 'mini_vect', 'minimum_vect', 'minor_vect', 'mins_vect', 'minute_vect', 'minutes_vect', 'minuts_vect', 'miracle_vect', 'mis_vect', 'miserable_vect', 'miss_vect', 'missed_vect', 'missin_vect', 'missing_vect', 'mistake_vect', 'mistakes_vect', 'mite_vect', 'mm_vect', 'mmm_vect', 'mmmm_vect', 'mmmmmm_vect', 'mnths_vect', 'mo_vect', 'moan_vect', 'mob_vect', 'mobile_vect', 'mobiles_vect', 'mobilesdirect_vect', 'mobilesvary_vect', 'mobileupd8_vect', 'mobno_vect', 'moby_vect', 'mode_vect', 'model_vect', 'module_vect', 'modules_vect', 'moji_vect', 'mojibiola_vect', 'mokka_vect', 'mom_vect', 'moment_vect', 'moments_vect', 'moms_vect', 'mon_vect', 'monday_vect', 'money_vect', 'monkeys_vect', 'mono_vect', 'month_vect', 'monthly_vect', 'months_vect', 'mood_vect', 'moon_vect', 'moral_vect', 'morn_vect', 'morning_vect', 'mostly_vect', 'mother_vect', 'motorola_vect', 'mouth_vect', 'move_vect', 'moved_vect', 'movie_vect', 'movies_vect', 'moving_vect', 'mp3_vect', 'mr_vect', 'mrng_vect', 'mrt_vect', 'msg_vect', 'msgs_vect', 'mths_vect', 'mu_vect', 'much_vect', 'mum_vect', 'mummy_vect', 'murder_vect', 'murdered_vect', 'murderer_vect', 'music_vect', 'must_vect', 'muz_vect', 'na_vect', 'nag_vect', 'nah_vect', 'naked_vect', 'name_vect', 'name1_vect', 'name2_vect', 'named_vect', 'names_vect', 'nan_vect', 'nap_vect', 'nasdaq_vect', 'nat_vect', 'national_vect', 'natural_vect', 'nature_vect', 'naughty_vect', 'nb_vect', 'nd_vect', 'ne_vect', 'near_vect', 'nearly_vect', 'necessarily_vect', 'necklace_vect', 'ned_vect', 'need_vect', 'needed_vect', 'needs_vect', 'neither_vect', 'net_vect', 'netcollex_vect', 'network_vect', 'networks_vect', 'neva_vect', 'never_vect', 'new_vect', 'newest_vect', 'news_vect', 'next_vect', 'ni8_vect', 'nice_vect', 'nigeria_vect', 'night_vect', 'nights_vect', 'nimya_vect', 'nite_vect', 'no1_vect', 'nobody_vect', 'noe_vect', 'nokia_vect', 'nokias_vect', 'noline_vect', 'none_vect', 'noon_vect', 'nope_vect', 'norm_vect', 'norm150ptone_vect', 'normal_vect', 'normally_vect', 'northampton_vect', 'note_vect', 'nothin_vect', 'nothing_vect', 'notice_vect', 'noun_vect', 'nowadays_vect', 'nt_vect', 'ntt_vect', 'ntwk_vect', 'num_vect', 'number_vect', 'numbers_vect', 'nuther_vect', 'nvm_vect', 'nw_vect', 'nxt_vect', 'nyc_vect', 'nydc_vect', 'nyt_vect', 'o2_vect', 'obviously_vect', 'odi_vect', 'offer_vect', 'offers_vect', 'office_vect', 'official_vect', 'officially_vect', 'ofice_vect', 'often_vect', 'oh_vect', 'oi_vect', 'oic_vect', 'ok_vect', 'okay_vect', 'okey_vect', 'okie_vect', 'ola_vect', 'old_vect', 'omg_vect', 'omw_vect', 'one_vect', 'ones_vect', 'oni_vect', 'online_vect', 'onto_vect', 'onwards_vect', 'oooh_vect', 'oops_vect', 'open_vect', 'opening_vect', 'operator_vect', 'opinion_vect', 'opportunity_vect', 'opt_vect', 'option_vect', 'optout_vect', 'or2stoptxt_vect', 'orange_vect', 'oranges_vect', 'orchard_vect', 'order_vect', 'ordered_vect', 'oredi_vect', 'original_vect', 'oru_vect', 'os_vect', 'oso_vect', 'others_vect', 'otherwise_vect', 'otside_vect', 'outside_vect', 'outstanding_vect', 'outta_vect', 'ovulation_vect', 'oz_vect', 'pa_vect', 'pack_vect', 'package_vect', 'page_vect', 'pages_vect', 'paid_vect', 'pain_vect', 'painful_vect', 'painting_vect', 'panic_vect', 'paper_vect', 'papers_vect', 'paperwork_vect', 'parco_vect', 'parent_vect', 'parents_vect', 'paris_vect', 'park_vect', 'parked_vect', 'parking_vect', 'part_vect', 'partner_vect', 'partnership_vect', 'party_vect', 'pass_vect', 'passed_vect', 'password_vect', 'past_vect', 'pattern_vect', 'patty_vect', 'pay_vect', 'paying_vect', 'payment_vect', 'payoh_vect', 'pc_vect', 'peace_vect', 'pence_vect', 'people_vect', 'per_vect', 'perfect_vect', 'period_vect', 'person_vect', 'personal_vect', 'pete_vect', 'petey_vect', 'pg_vect', 'philosophy_vect', 'phoenix_vect', 'phone_vect', 'phones_vect', 'photo_vect', 'photos_vect', 'pic_vect', 'pick_vect', 'picked_vect', 'picking_vect', 'pics_vect', 'picsfree1_vect', 'picture_vect', 'pictures_vect', 'pie_vect', 'pieces_vect', 'pig_vect', 'pilates_vect', 'pin_vect', 'pink_vect', 'piss_vect', 'pissed_vect', 'pix_vect', 'pizza_vect', 'place_vect', 'places_vect', 'plan_vect', 'planned_vect', 'planning_vect', 'plans_vect', 'play_vect', 'played_vect', 'player_vect', 'players_vect', 'playing_vect', 'please_vect', 'pleased_vect', 'pleasure_vect', 'plenty_vect', 'pls_vect', 'plus_vect', 'plz_vect', 'pm_vect', 'po_vect', 'pobox_vect', 'pobox334_vect', 'pobox36504w45wq_vect', 'pobox45w2tg150p_vect', 'pobox84_vect', 'pod_vect', 'point_vect', 'points_vect', 'poker_vect', 'pole_vect', 'police_vect', 'politicians_vect', 'poly_vect', 'polyphonic_vect', 'polys_vect', 'pongal_vect', 'poor_vect', 'pop_vect', 'popped_vect', 'porn_vect', 'possession_vect', 'possible_vect', 'post_vect', 'postcard_vect', 'postcode_vect', 'posted_vect', 'posts_vect', 'potential_vect', 'potter_vect', 'pound_vect', 'pounds_vect', 'pouts_vect', 'power_vect', 'ppl_vect', 'pple_vect', 'ppm_vect', 'prabha_vect', 'practice_vect', 'practicing_vect', 'pray_vect', 'prefer_vect', 'premier_vect', 'prepare_vect', 'prescription_vect', 'present_vect', 'press_vect', 'pretty_vect', 'prey_vect', 'price_vect', 'prince_vect', 'princess_vect', 'print_vect', 'privacy_vect', 'private_vect', 'prize_vect', 'prob_vect', 'probably_vect', 'problem_vect', 'problems_vect', 'process_vect', 'processed_vect', 'prof_vect', 'profit_vect', 'program_vect', 'project_vect', 'prolly_vect', 'promise_vect', 'promises_vect', 'promo_vect', 'proof_vect', 'properly_vect', 'prospects_vect', 'provided_vect', 'ps_vect', 'ptbo_vect', 'pub_vect', 'pull_vect', 'purchase_vect', 'purity_vect', 'purpose_vect', 'push_vect', 'pushes_vect', 'pussy_vect', 'put_vect', 'puttin_vect', 'putting_vect', 'qatar_vect', 'quality_vect', 'queen_vect', 'ques_vect', 'question_vect', 'questions_vect', 'quick_vect', 'quickly_vect', 'quiet_vect', 'quit_vect', 'quite_vect', 'quiz_vect', 'quote_vect', 'quoting_vect', 'racing_vect', 'radio_vect', 'railway_vect', 'rain_vect', 'raining_vect', 'raise_vect', 'rakhesh_vect', 'rally_vect', 'ran_vect', 'random_vect', 'randomly_vect', 'randy_vect', 'rang_vect', 'range_vect', 'ranjith_vect', 'rate_vect', 'rates_vect', 'rather_vect', 'rays_vect', 'rcvd_vect', 'rd_vect', 're_vect', 'reach_vect', 'reached_vect', 'reaching_vect', 'reaction_vect', 'read_vect', 'readers_vect', 'reading_vect', 'ready_vect', 'real_vect', 'realise_vect', 'reality_vect', 'realized_vect', 'really_vect', 'realy_vect', 'reason_vect', 'reasonable_vect', 'reasons_vect', 'reboot_vect', 'recd_vect', 'receipt_vect', 'receive_vect', 'received_vect', 'receiving_vect', 'recent_vect', 'recently_vect', 'recession_vect', 'record_vect', 'records_vect', 'recovery_vect', 'red_vect', 'ref_vect', 'reference_vect', 'reg_vect', 'regards_vect', 'register_vect', 'registered_vect', 'regret_vect', 'regular_vect', 'relation_vect', 'relax_vect', 'released_vect', 'rem_vect', 'remain_vect', 'remains_vect', 'remember_vect', 'remembered_vect', 'remembr_vect', 'remind_vect', 'reminder_vect', 'remove_vect', 'rent_vect', 'rental_vect', 'rentl_vect', 'repair_vect', 'repeat_vect', 'replied_vect', 'reply_vect', 'replying_vect', 'report_vect', 'representative_vect', 'request_vect', 'requests_vect', 'research_vect', 'resend_vect', 'respect_vect', 'respond_vect', 'responding_vect', 'response_vect', 'responsibility_vect', 'rest_vect', 'restaurant_vect', 'result_vect', 'results_vect', 'retrieve_vect', 'return_vect', 'returned_vect', 'returns_vect', 'reveal_vect', 'revealed_vect', 'review_vect', 'revision_vect', 'reward_vect', 'rhythm_vect', 'rice_vect', 'rich_vect', 'ride_vect', 'right_vect', 'rights_vect', 'ring_vect', 'ringtone_vect', 'ringtones_vect', 'rite_vect', 'river_vect', 'road_vect', 'roads_vect', 'roast_vect', 'rock_vect', 'rocks_vect', 'rofl_vect', 'roger_vect', 'role_vect', 'ron_vect', 'room_vect', 'roommate_vect', 'roommates_vect', 'rooms_vect', 'rose_vect', 'round_vect', 'row_vect', 'roww1j6hl_vect', 'roww1jhl_vect', 'royal_vect', 'rply_vect', 'rs_vect', 'rstm_vect', 'ru_vect', 'rub_vect', 'rude_vect', 'rule_vect', 'run_vect', 'running_vect', 'runs_vect', 'rush_vect', 'sacrifice_vect', 'sad_vect', 'sae_vect', 'safe_vect', 'said_vect', 'salam_vect', 'salary_vect', 'sale_vect', 'salon_vect', 'sam_vect', 'santa_vect', 'sar_vect', 'sarasota_vect', 'sarcastic_vect', 'sary_vect', 'sat_vect', 'sathya_vect', 'saturday_vect', 'savamob_vect', 'save_vect', 'saved_vect', 'saw_vect', 'say_vect', 'saying_vect', 'says_vect', 'scared_vect', 'scary_vect', 'sch_vect', 'schedule_vect', 'school_vect', 'schools_vect', 'science_vect', 'scold_vect', 'score_vect', 'scores_vect', 'scotland_vect', 'scream_vect', 'screaming_vect', 'scrounge_vect', 'se_vect', 'sea_vect', 'search_vect', 'searching_vect', 'season_vect', 'seat_vect', 'sec_vect', 'second_vect', 'seconds_vect', 'secret_vect', 'secretary_vect', 'secs_vect', 'sed_vect', 'see_vect', 'seeing_vect', 'seem_vect', 'seemed_vect', 'seems_vect', 'seen_vect', 'selected_vect', 'selection_vect', 'self_vect', 'sell_vect', 'selling_vect', 'sells_vect', 'sem_vect', 'semester_vect', 'sen_vect', 'send_vect', 'sender_vect', 'sending_vect', 'sense_vect', 'sent_vect', 'sentence_vect', 'sept_vect', 'series_vect', 'serious_vect', 'seriously_vect', 'service_vect', 'services_vect', 'serving_vect', 'set_vect', 'setting_vect', 'settings_vect', 'settle_vect', 'settled_vect', 'seven_vect', 'several_vect', 'sex_vect', 'sexy_vect', 'sh_vect', 'sha_vect', 'shall_vect', 'share_vect', 'shd_vect', 'sheets_vect', 'shes_vect', 'shesil_vect', 'shining_vect', 'ship_vect', 'shipping_vect', 'shirt_vect', 'shirts_vect', 'shit_vect', 'shld_vect', 'shocking_vect', 'shoot_vect', 'shop_vect', 'shoppin_vect', 'shopping_vect', 'short_vect', 'shorter_vect', 'shortly_vect', 'shot_vect', 'shoving_vect', 'show_vect', 'shower_vect', 'showing_vect', 'shows_vect', 'shu_vect', 'shuhui_vect', 'shy_vect', 'si_vect', 'sick_vect', 'side_vect', 'sighs_vect', 'sight_vect', 'sign_vect', 'signing_vect', 'silence_vect', 'silent_vect', 'silver_vect', 'sim_vect', 'simple_vect', 'simply_vect', 'since_vect', 'sing_vect', 'singing_vect', 'single_vect', 'singles_vect', 'sipix_vect', 'sir_vect', 'sis_vect', 'sister_vect', 'sit_vect', 'site_vect', 'sitting_vect', 'situation_vect', 'siva_vect', 'six_vect', 'size_vect', 'sk3_vect', 'sk38xh_vect', 'skilgme_vect', 'skip_vect', 'sky_vect', 'skype_vect', 'skyped_vect', 'slap_vect', 'slave_vect', 'sleep_vect', 'sleepin_vect', 'sleeping_vect', 'sleepy_vect', 'slept_vect', 'slice_vect', 'slide_vect', 'slightly_vect', 'slip_vect', 'slippers_vect', 'slo_vect', 'slots_vect', 'slow_vect', 'slowly_vect', 'small_vect', 'smashed_vect', 'smile_vect', 'smiles_vect', 'smiling_vect', 'smoke_vect', 'smoking_vect', 'sms_vect', 'smth_vect', 'sn_vect', 'snake_vect', 'snow_vect', 'social_vect', 'sofa_vect', 'soft_vect', 'software_vect', 'sol_vect', 'some1_vect', 'somebody_vect', 'someone_vect', 'somethin_vect', 'something_vect', 'sometimes_vect', 'somewhere_vect', 'song_vect', 'songs_vect', 'sony_vect', 'sonyericsson_vect', 'soon_vect', 'sooner_vect', 'sore_vect', 'sorry_vect', 'sort_vect', 'sorting_vect', 'sound_vect', 'sounds_vect', 'south_vect', 'sp_vect', 'space_vect', 'spanish_vect', 'speak_vect', 'speaking_vect', 'special_vect', 'specialcall_vect', 'specially_vect', 'speed_vect', 'spend_vect', 'spending_vect', 'spent_vect', 'spk_vect', 'spoke_vect', 'spoken_vect', 'spook_vect', 'sport_vect', 'sports_vect', 'spree_vect', 'spring_vect', 'sptv_vect', 'sry_vect', 'st_vect', 'staff_vect', 'stamps_vect', 'stand_vect', 'standard_vect', 'standing_vect', 'star_vect', 'staring_vect', 'start_vect', 'started_vect', 'starting_vect', 'starts_vect', 'starwars3_vect', 'statement_vect', 'station_vect', 'stay_vect', 'stayed_vect', 'staying_vect', 'std_vect', 'steam_vect', 'step_vect', 'steve_vect', 'stick_vect', 'sticky_vect', 'still_vect', 'stock_vect', 'stockport_vect', 'stomach_vect', 'stomps_vect', 'stones_vect', 'stop_vect', 'stopped_vect', 'stops_vect', 'store_vect', 'stores_vect', 'story_vect', 'str_vect', 'straight_vect', 'stranger_vect', 'street_vect', 'stress_vect', 'strike_vect', 'strong_vect', 'strongbuy_vect', 'stuck_vect', 'student_vect', 'study_vect', 'studying_vect', 'stuff_vect', 'stupid_vect', 'style_vect', 'stylish_vect', 'sub_vect', 'subpoly_vect', 'subs_vect', 'subscribe6gbpmnth_vect', 'subscribed_vect', 'subscriber_vect', 'subscription_vect', 'success_vect', 'successful_vect', 'successfully_vect', 'sucks_vect', 'sue_vect', 'sufficient_vect', 'suggest_vect', 'suite_vect', 'suits_vect', 'sum1_vect', 'summer_vect', 'sun_vect', 'sunday_vect', 'sunlight_vect', 'sunny_vect', 'sunshine_vect', 'suntec_vect', 'sup_vect', 'super_vect', 'superb_vect', 'superior_vect', 'supervisor_vect', 'supply_vect', 'support_vect', 'suppose_vect', 'supposed_vect', 'suprman_vect', 'sura_vect', 'sure_vect', 'surely_vect', 'surfing_vect', 'surprise_vect', 'surprised_vect', 'survey_vect', 'sux_vect', 'suzy_vect', 'sw7_vect', 'sw73ss_vect', 'sweet_vect', 'swing_vect', 'system_vect', 'ta_vect', 'tablets_vect', 'tahan_vect', 'take_vect', 'taken_vect', 'takes_vect', 'takin_vect', 'taking_vect', 'talent_vect', 'talk_vect', 'talking_vect', 'tampa_vect', 'tape_vect', 'tariffs_vect', 'tat_vect', 'taunton_vect', 'taylor_vect', 'tb_vect', 'tc_vect', 'tcrw1_vect', 'tcs_vect', 'tea_vect', 'teach_vect', 'teacher_vect', 'teaches_vect', 'team_vect', 'tear_vect', 'tease_vect', 'teasing_vect', 'tech_vect', 'technical_vect', 'tee_vect', 'teeth_vect', 'tel_vect', 'telephone_vect', 'tell_vect', 'telling_vect', 'tells_vect', 'telugu_vect', 'temple_vect', 'ten_vect', 'tenants_vect', 'tenerife_vect', 'tension_vect', 'term_vect', 'terms_vect', 'terrible_vect', 'test_vect', 'testing_vect', 'text_vect', 'texted_vect', 'texting_vect', 'textoperator_vect', 'texts_vect', 'th_vect', 'thangam_vect', 'thank_vect', 'thanks_vect', 'thanksgiving_vect', 'thanx_vect', 'that_vect', 'thats_vect', 'thatåõs_vect', 'the_vect', 'theatre_vect', 'themob_vect', 'theory_vect', 'thesis_vect', 'thgt_vect', 'thing_vect', 'things_vect', 'think_vect', 'thinkin_vect', 'thinking_vect', 'thinks_vect', 'thk_vect', 'thnk_vect', 'tho_vect', 'though_vect', 'thought_vect', 'three_vect', 'throat_vect', 'throw_vect', 'thru_vect', 'tht_vect', 'thts_vect', 'thurs_vect', 'thursday_vect', 'tick_vect', 'ticket_vect', 'tickets_vect', 'tight_vect', 'tihs_vect', 'til_vect', 'till_vect', 'time_vect', 'times_vect', 'timing_vect', 'tired_vect', 'tirunelvali_vect', 'tirupur_vect', 'tissco_vect', 'tkts_vect', 'tm_vect', 'tming_vect', 'tmobile_vect', 'tmr_vect', 'tncs_vect', 'toa_vect', 'toclaim_vect', 'today_vect', 'todays_vect', 'tog_vect', 'together_vect', 'tok_vect', 'told_vect', 'tomarrow_vect', 'tomo_vect', 'tomorrow_vect', 'tone_vect', 'tones_vect', 'tones2youcouk_vect', 'tonight_vect', 'tonite_vect', 'took_vect', 'tool_vect', 'tooo_vect', 'toot_vect', 'top_vect', 'topic_vect', 'torch_vect', 'toshiba_vect', 'tot_vect', 'total_vect', 'totally_vect', 'touch_vect', 'tough_vect', 'tour_vect', 'towards_vect', 'town_vect', 'track_vect', 'trade_vect', 'traffic_vect', 'train_vect', 'training_vect', 'transaction_vect', 'transfer_vect', 'transport_vect', 'travel_vect', 'treat_vect', 'treated_vect', 'tried_vect', 'trip_vect', 'trips_vect', 'trouble_vect', 'true_vect', 'truffles_vect', 'truly_vect', 'trust_vect', 'truth_vect', 'try_vect', 'trying_vect', 'ts_vect', 'tscs_vect', 'tscs087147403231winawk_vect', 'tt_vect', 'ttyl_vect', 'tues_vect', 'tuesday_vect', 'tuition_vect', 'turn_vect', 'turning_vect', 'turns_vect', 'tv_vect', 'twelve_vect', 'twice_vect', 'two_vect', 'txt_vect', 'txtauction_vect', 'txtin_vect', 'txting_vect', 'txtno_vect', 'txts_vect', 'txtstop_vect', 'tyler_vect', 'type_vect', 'tyrone_vect', 'u4_vect', 'ubi_vect', 'ufind_vect', 'ugh_vect', 'uh_vect', 'uk_vect', 'uks_vect', 'ultimatum_vect', 'umma_vect', 'unable_vect', 'uncle_vect', 'understand_vect', 'understanding_vect', 'understood_vect', 'underwear_vect', 'unemployed_vect', 'uni_vect', 'unique_vect', 'university_vect', 'unless_vect', 'unlimited_vect', 'unnecessarily_vect', 'unredeemed_vect', 'unsold_vect', 'unsub_vect', 'unsubscribe_vect', 'upd8_vect', 'update_vect', 'updatenow_vect', 'upgrade_vect', 'upload_vect', 'upset_vect', 'upstairs_vect', 'ur_vect', 'ure_vect', 'urgent_vect', 'urgnt_vect', 'url_vect', 'urn_vect', 'urself_vect', 'us_vect', 'usb_vect', 'use_vect', 'used_vect', 'user_vect', 'usf_vect', 'using_vect', 'usual_vect', 'usually_vect', 'vale_vect', 'valentine_vect', 'valentines_vect', 'valid_vect', 'valid12hrs_vect', 'valuable_vect', 'value_vect', 'valued_vect', 'vary_vect', 've_vect', 'vegas_vect', 'verify_vect', 'version_vect', 'via_vect', 'vid_vect', 'video_vect', 'videochat_vect', 'videophones_vect', 'vijay_vect', 'vikky_vect', 'village_vect', 'violated_vect', 'violence_vect', 'vip_vect', 'virgin_vect', 'visit_vect', 'vivek_vect', 'vl_vect', 'voda_vect', 'vodafone_vect', 'vodka_vect', 'voice_vect', 'voicemail_vect', 'vomit_vect', 'vote_vect', 'voucher_vect', 'vouchers_vect', 'vry_vect', 'vth_vect', 'w45wq_vect', 'wa_vect', 'wah_vect', 'wait_vect', 'waited_vect', 'waitin_vect', 'waiting_vect', 'wake_vect', 'waking_vect', 'wales_vect', 'walk_vect', 'walked_vect', 'walking_vect', 'walmart_vect', 'wan_vect', 'wana_vect', 'want_vect', 'wanted_vect', 'wanting_vect', 'wants_vect', 'wap_vect', 'warm_vect', 'warner_vect', 'waste_vect', 'wasted_vect', 'wat_vect', 'watch_vect', 'watching_vect', 'water_vect', 'wats_vect', 'way_vect', 'wc1n3xx_vect', 'we_vect', 'weak_vect', 'wear_vect', 'wearing_vect', 'weather_vect', 'web_vect', 'website_vect', 'wed_vect', 'wedding_vect', 'wednesday_vect', 'wee_vect', 'weed_vect', 'week_vect', 'weekend_vect', 'weekends_vect', 'weekly_vect', 'weeks_vect', 'weigh_vect', 'weight_vect', 'weird_vect', 'welcome_vect', 'well_vect', 'welp_vect', 'wen_vect', 'went_vect', 'west_vect', 'wet_vect', 'what_vect', 'whatever_vect', 'whats_vect', 'whenever_vect', 'whenevr_vect', 'wherever_vect', 'whether_vect', 'white_vect', 'whn_vect', 'whole_vect', 'whos_vect', 'whose_vect', 'wid_vect', 'widelivecomindex_vect', 'wif_vect', 'wife_vect', 'wil_vect', 'willing_vect', 'win_vect', 'wind_vect', 'wine_vect', 'winner_vect', 'winning_vect', 'wins_vect', 'wipro_vect', 'wisdom_vect', 'wise_vect', 'wish_vect', 'wishes_vect', 'wishing_vect', 'wit_vect', 'within_vect', 'without_vect', 'wiv_vect', 'wk_vect', 'wkend_vect', 'wkg_vect', 'wkly_vect', 'wks_vect', 'wld_vect', 'wml_vect', 'wn_vect', 'wnt_vect', 'wo_vect', 'woke_vect', 'woken_vect', 'woman_vect', 'women_vect', 'wonder_vect', 'wonderful_vect', 'wondering_vect', 'wont_vect', 'woot_vect', 'word_vect', 'words_vect', 'work_vect', 'workin_vect', 'working_vect', 'works_vect', 'world_vect', 'worried_vect', 'worries_vect', 'worry_vect', 'worse_vect', 'worst_vect', 'worth_vect', 'wot_vect', 'would_vect', 'wow_vect', 'write_vect', 'wrong_vect', 'wtf_vect', 'wud_vect', 'wuld_vect', 'wun_vect', 'www4tcbiz_vect', 'wwwcomuknet_vect', 'wwwetlpcoukexpressoffer_vect', 'wwwgetzedcouk_vect', 'wwwldewcom_vect', 'wwwldewcom1win150ppmx3age16_vect', 'wwwmovietriviatv_vect', 'wwwringtonescouk_vect', 'wwwsmsconet_vect', 'wwwtxttowincouk_vect', 'wwwurawinnercom_vect', 'wylie_vect', 'xchat_vect', 'xmas_vect', 'xuhui_vect', 'xx_vect', 'xxx_vect', 'xxxx_vect', 'xxxxx_vect', 'xy_vect', 'ya_vect', 'yahoo_vect', 'yan_vect', 'yar_vect', 'yay_vect', 'yck_vect', 'yeah_vect', 'year_vect', 'years_vect', 'yelling_vect', 'yellow_vect', 'yep_vect', 'yes_vect', 'yest_vect', 'yesterday_vect', 'yet_vect', 'yetunde_vect', 'yijue_vect', 'ym_vect', 'yo_vect', 'yoga_vect', 'yogasana_vect', 'yor_vect', 'you_vect', 'yr_vect', 'yrs_vect', 'yummy_vect', 'yun_vect', 'yuo_vect', 'yup_vect', 'zed_vect', 'zindgi_vect', 'ìï_vect', 'ûò_vect'], 'input_features_type': {'v2': 'O'}, 'word2num_features': [], 'unpreprocessed_columns': [], 'force_numeric_conv': [], 'conversion_method': 'TF_IDF'}, 'selector': {'reducer': False, 'reducer_file': '', 'input_features': ['v2'], 'output_features': ['07xxxxxxxxx_vect', '08700621170150p_vect', '08702840625comuk_vect', '08718726270150gbpmtmsg18_vect', '1000s_vect', '10am7pm_vect', '10k_vect', '10p_vect', '10pmin_vect', '10ppm_vect', '11mths_vect', '125gift_vect', '12hrs_vect', '12mths_vect', '150p_vect', '150perwksub_vect', '150pm_vect', '150pmin_vect', '150pmsg_vect', '150pmsgrcvdhgsuite3422landsroww1j6hl_vect', '150pmtmsgrcvd18_vect', '150ppm_vect', '150ptone_vect', '150pwk_vect', '150week_vect', '16only_vect', '18only_vect', '1hr_vect', '1minmobsmorelkpobox177hp51fl_vect', '1st_vect', '1x150pwk_vect', '20p_vect', '20pmin_vect', '21st_vect', '220cm2_vect', '24hrs_vect', '25p_vect', '26th_vect', '2day_vect', '2find_vect', '2geva_vect', '2go_vect', '2marrow_vect', '2mrw_vect', '2nd_vect', '2nite_vect', '2optout_vect', '2p_vect', '2u_vect', '2waxsto_vect', '2wks_vect', '300p_vect', '31pmsg_vect', '3510i_vect', '3d_vect', '3g_vect', '3gbp_vect', '3hrs_vect', '3mins_vect', '3qxj9_vect', '3rd_vect', '3ss_vect', '3u_vect', '3uz_vect', '3wk_vect', '40gb_vect', '4a_vect', '4d_vect', '4eva_vect', '4get_vect', '4info_vect', '4mths_vect', '4th_vect', '4u_vect', '50p_vect', '5min_vect', '5pm_vect', '5wb_vect', '5we_vect', '60pmin_vect', '6hrs_vect', '6months_vect', '6pm_vect', '7250i_vect', '7ish_vect', '8am_vect', '8pm_vect', '8th_vect', '8wp_vect', '9ae_vect', '9ja_vect', '9pm_vect', '9t_vect', 'aathi_vect', 'abi_vect', 'ability_vect', 'abiola_vect', 'able_vect', 'abt_vect', 'abta_vect', 'aburo_vect', 'ac_vect', 'academic_vect', 'acc_vect', 'accept_vect', 'access_vect', 'accident_vect', 'accidentally_vect', 'accordingly_vect', 'account_vect', 'ache_vect', 'across_vect', 'acted_vect', 'action_vect', 'activate_vect', 'activities_vect', 'actor_vect', 'actual_vect', 'actually_vect', 'ad_vect', 'adam_vect', 'add_vect', 'added_vect', 'addicted_vect', 'addie_vect', 'address_vect', 'admin_vect', 'administrator_vect', 'admirer_vect', 'admit_vect', 'adore_vect', 'adoring_vect', 'ads_vect', 'adult_vect', 'advance_vect', 'adventure_vect', 'advice_vect', 'advise_vect', 'affair_vect', 'affairs_vect', 'affectionate_vect', 'afraid_vect', 'aft_vect', 'afternoon_vect', 'aftr_vect', 'agalla_vect', 'age_vect', 'age16_vect', 'ages_vect', 'ago_vect', 'agree_vect', 'ah_vect', 'aha_vect', 'ahead_vect', 'ahmad_vect', 'ai_vect', 'aight_vect', 'aint_vect', 'air_vect', 'airport_vect', 'airtel_vect', 'aiya_vect', 'aiyah_vect', 'aiyar_vect', 'aiyo_vect', 'al_vect', 'album_vect', 'alert_vect', 'alex_vect', 'alfie_vect', 'ali_vect', 'allah_vect', 'allow_vect', 'allowed_vect', 'almost_vect', 'alone_vect', 'along_vect', 'already_vect', 'alright_vect', 'alrite_vect', 'also_vect', 'always_vect', 'alwys_vect', 'amazing_vect', 'american_vect', 'among_vect', 'amount_vect', 'amp_vect', 'amt_vect', 'andros_vect', 'angry_vect', 'annie_vect', 'anniversary_vect', 'announcement_vect', 'anot_vect', 'another_vect', 'ans_vect', 'ansr_vect', 'answer_vect', 'answered_vect', 'answering_vect', 'answers_vect', 'anthony_vect', 'anti_vect', 'anybody_vect', 'anymore_vect', 'anyone_vect', 'anything_vect', 'anytime_vect', 'anyway_vect', 'anyways_vect', 'apartment_vect', 'app_vect', 'apparently_vect', 'applebees_vect', 'apply_vect', 'appointment_vect', 'appreciate_vect', 'appreciated_vect', 'approx_vect', 'apps_vect', 'appt_vect', 'april_vect', 'ar_vect', 'arcade_vect', 'ard_vect', 'area_vect', 'argh_vect', 'argument_vect', 'arm_vect', 'armand_vect', 'arms_vect', 'around_vect', 'arrange_vect', 'arrested_vect', 'arrive_vect', 'arsenal_vect', 'art_vect', 'arun_vect', 'asap_vect', 'ashley_vect', 'ask_vect', 'askd_vect', 'asked_vect', 'askin_vect', 'asking_vect', 'asks_vect', 'asleep_vect', 'ass_vect', 'assume_vect', 'ate_vect', 'atlanta_vect', 'atlast_vect', 'atm_vect', 'attached_vect', 'attempt_vect', 'attend_vect', 'auction_vect', 'august_vect', 'aunt_vect', 'aunty_vect', 'auto_vect', 'av_vect', 'available_vect', 'avatar_vect', 'ave_vect', 'avent_vect', 'avoid_vect', 'await_vect', 'awaiting_vect', 'awake_vect', 'award_vect', 'awarded_vect', 'away_vect', 'awesome_vect', 'aww_vect', 'b4_vect', 'ba_vect', 'babe_vect', 'babes_vect', 'babies_vect', 'baby_vect', 'back_vect', 'bad_vect', 'bag_vect', 'bags_vect', 'bahamas_vect', 'bak_vect', 'balance_vect', 'bank_vect', 'banks_vect', 'bar_vect', 'barely_vect', 'basic_vect', 'basically_vect', 'bat_vect', 'bath_vect', 'bathe_vect', 'bathing_vect', 'battery_vect', 'bay_vect', 'bb_vect', 'bc_vect', 'bck_vect', 'bcoz_vect', 'bday_vect', 'be_vect', 'bears_vect', 'beautiful_vect', 'beauty_vect', 'bec_vect', 'become_vect', 'becoz_vect', 'bed_vect', 'bedrm_vect', 'bedroom_vect', 'beer_vect', 'befor_vect', 'beg_vect', 'begin_vect', 'behave_vect', 'behind_vect', 'bein_vect', 'believe_vect', 'bell_vect', 'belly_vect', 'belovd_vect', 'best_vect', 'bet_vect', 'better_vect', 'beyond_vect', 'bf_vect', 'bid_vect', 'bids_vect', 'big_vect', 'bigger_vect', 'biggest_vect', 'bill_vect', 'billed_vect', 'billion_vect', 'bills_vect', 'bin_vect', 'biola_vect', 'birds_vect', 'birla_vect', 'birth_vect', 'birthdate_vect', 'birthday_vect', 'bishan_vect', 'bit_vect', 'bitch_vect', 'bite_vect', 'black_vect', 'blackberry_vect', 'blah_vect', 'blake_vect', 'blank_vect', 'bleh_vect', 'bless_vect', 'blessing_vect', 'bloo_vect', 'blood_vect', 'bloody_vect', 'blue_vect', 'bluetooth_vect', 'bluff_vect', 'boat_vect', 'body_vect', 'bold_vect', 'bone_vect', 'bonus_vect', 'boo_vect', 'book_vect', 'booked_vect', 'booking_vect', 'books_vect', 'boost_vect', 'booty_vect', 'bored_vect', 'boring_vect', 'born_vect', 'boss_vect', 'boston_vect', 'bother_vect', 'bottom_vect', 'bought_vect', 'bout_vect', 'bowl_vect', 'box_vect', 'box326_vect', 'box334sk38ch_vect', 'box97n7qp_vect', 'boy_vect', 'boye_vect', 'boyfriend_vect', 'boys_vect', 'boytoy_vect', 'brah_vect', 'brand_vect', 'bread_vect', 'break_vect', 'breathe_vect', 'bright_vect', 'brilliant_vect', 'bring_vect', 'bringing_vect', 'brings_vect', 'british_vect', 'bro_vect', 'broad_vect', 'broke_vect', 'broken_vect', 'bros_vect', 'brothas_vect', 'brother_vect', 'brought_vect', 'bruv_vect', 'bslvyl_vect', 'bt_vect', 'btnationalrate_vect', 'btw_vect', 'bucks_vect', 'bud_vect', 'budget_vect', 'buff_vect', 'buffet_vect', 'bugis_vect', 'building_vect', 'buns_vect', 'burger_vect', 'burns_vect', 'bus_vect', 'buses_vect', 'business_vect', 'busy_vect', 'butt_vect', 'buy_vect', 'buying_vect', 'buzz_vect', 'bx420_vect', 'bx420ip45we_vect', 'bye_vect', 'ca_vect', 'cabin_vect', 'cafe_vect', 'cake_vect', 'cal_vect', 'calculation_vect', 'calicut_vect', 'california_vect', 'call_vect', 'call2optout674_vect', 'callback_vect', 'callcost_vect', 'called_vect', 'caller_vect', 'callers_vect', 'callertune_vect', 'callin_vect', 'calling_vect', 'calls_vect', 'callså_vect', 'cam_vect', 'camcorder_vect', 'came_vect', 'camera_vect', 'cameravideo_vect', 'campus_vect', 'can_vect', 'canada_vect', 'canal_vect', 'canary_vect', 'cancel_vect', 'cancelled_vect', 'cancer_vect', 'cant_vect', 'captain_vect', 'car_vect', 'card_vect', 'cardiff_vect', 'care_vect', 'cared_vect', 'career_vect', 'careful_vect', 'carefully_vect', 'caring_vect', 'carlos_vect', 'caroline_vect', 'cars_vect', 'cartoon_vect', 'case_vect', 'cash_vect', 'cashbalance_vect', 'cashin_vect', 'castor_vect', 'cat_vect', 'catch_vect', 'catching_vect', 'caught_vect', 'cause_vect', 'cbe_vect', 'cc_vect', 'cd_vect', 'cdgt_vect', 'cds_vect', 'celebrate_vect', 'celebration_vect', 'cell_vect', 'center_vect', 'centre_vect', 'certainly_vect', 'cha_vect', 'chain_vect', 'challenge_vect', 'chance_vect', 'change_vect', 'changed_vect', 'changes_vect', 'channel_vect', 'character_vect', 'charge_vect', 'charged_vect', 'charges_vect', 'charity_vect', 'charles_vect', 'chase_vect', 'chasing_vect', 'chat_vect', 'chatting_vect', 'cheap_vect', 'cheaper_vect', 'cheat_vect', 'chechi_vect', 'check_vect', 'checked_vect', 'checking_vect', 'cheers_vect', 'chennai_vect', 'cherish_vect', 'chest_vect', 'chicken_vect', 'chikku_vect', 'child_vect', 'childish_vect', 'children_vect', 'chill_vect', 'chillin_vect', 'china_vect', 'chinese_vect', 'chip_vect', 'chocolate_vect', 'choice_vect', 'choose_vect', 'chosen_vect', 'christ_vect', 'christmas_vect', 'church_vect', 'cine_vect', 'cinema_vect', 'citizen_vect', 'city_vect', 'claim_vect', 'claims_vect', 'claire_vect', 'class_vect', 'classes_vect', 'clean_vect', 'cleaning_vect', 'clear_vect', 'clearly_vect', 'click_vect', 'clock_vect', 'close_vect', 'closed_vect', 'closer_vect', 'closes_vect', 'clothes_vect', 'club_vect', 'cn_vect', 'co_vect', 'coast_vect', 'coat_vect', 'cochin_vect', 'code_vect', 'coffee_vect', 'coin_vect', 'coins_vect', 'cold_vect', 'colleagues_vect', 'collect_vect', 'collected_vect', 'collecting_vect', 'collection_vect', 'college_vect', 'colour_vect', 'come_vect', 'comedy_vect', 'comes_vect', 'comin_vect', 'coming_vect', 'commercial_vect', 'common_vect', 'community_vect', 'comp_vect', 'company_vect', 'competition_vect', 'complete_vect', 'completed_vect', 'completely_vect', 'complimentary_vect', 'computer_vect', 'concentrate_vect', 'concert_vect', 'conditions_vect', 'conducts_vect', 'confidence_vect', 'confirm_vect', 'congrats_vect', 'congratulations_vect', 'connection_vect', 'consider_vect', 'considering_vect', 'constant_vect', 'constantly_vect', 'contact_vect', 'contacted_vect', 'contacts_vect', 'content_vect', 'contents_vect', 'continue_vect', 'contract_vect', 'control_vect', 'convey_vect', 'convinced_vect', 'cool_vect', 'coping_vect', 'copy_vect', 'cornwall_vect', 'correct_vect', 'cos_vect', 'cost_vect', 'costa_vect', 'costs_vect', 'costå_vect', 'could_vect', 'count_vect', 'countin_vect', 'country_vect', 'couple_vect', 'course_vect', 'cover_vect', 'coz_vect', 'cr9_vect', 'cramps_vect', 'crave_vect', 'crazy_vect', 'created_vect', 'credit_vect', 'credited_vect', 'credits_vect', 'creepy_vect', 'crisis_vect', 'crore_vect', 'cross_vect', 'croydon_vect', 'cruise_vect', 'cry_vect', 'cs_vect', 'csbcm4235wc1n3xx_vect', 'csstop_vect', 'cud_vect', 'cuddling_vect', 'cum_vect', 'cup_vect', 'curious_vect', 'current_vect', 'currently_vect', 'cust_vect', 'custcare_vect', 'custcare08718720201_vect', 'custom_vect', 'customer_vect', 'customers_vect', 'cut_vect', 'cute_vect', 'cutting_vect', 'cuz_vect', 'cw25wx_vect', 'da_vect', 'dad_vect', 'daddy_vect', 'daily_vect', 'damn_vect', 'dance_vect', 'dancing_vect', 'dare_vect', 'dark_vect', 'darlin_vect', 'darling_vect', 'darren_vect', 'dat_vect', 'date_vect', 'dates_vect', 'dating_vect', 'dave_vect', 'day_vect', 'days_vect', 'de_vect', 'dead_vect', 'deal_vect', 'dealer_vect', 'dealing_vect', 'dear_vect', 'dearly_vect', 'death_vect', 'decide_vect', 'decided_vect', 'decimal_vect', 'decision_vect', 'deep_vect', 'def_vect', 'definite_vect', 'definitely_vect', 'del_vect', 'delete_vect', 'deleted_vect', 'delhi_vect', 'deliver_vect', 'delivered_vect', 'deliveredtomorrow_vect', 'delivery_vect', 'dem_vect', 'demand_vect', 'den_vect', 'denis_vect', 'department_vect', 'depends_vect', 'depressed_vect', 'derek_vect', 'desires_vect', 'desperate_vect', 'details_vect', 'dey_vect', 'dhoni_vect', 'dial_vect', 'dick_vect', 'dictionary_vect', 'didn_vect', 'didnt_vect', 'didt_vect', 'die_vect', 'died_vect', 'diet_vect', 'different_vect', 'difficult_vect', 'digital_vect', 'dignity_vect', 'din_vect', 'dinner_vect', 'dint_vect', 'direct_vect', 'directly_vect', 'dirty_vect', 'dis_vect', 'discount_vect', 'discuss_vect', 'dislikes_vect', 'display_vect', 'distance_vect', 'distract_vect', 'disturb_vect', 'division_vect', 'dload_vect', 'dnt_vect', 'doc_vect', 'docs_vect', 'doctor_vect', 'doesnt_vect', 'dog_vect', 'dogging_vect', 'doggy_vect', 'doin_vect', 'dollars_vect', 'don_vect', 'done_vect', 'dont_vect', 'donåõt_vect', 'door_vect', 'dorm_vect', 'double_vect', 'dough_vect', 'download_vect', 'downloads_vect', 'draw_vect', 'dream_vect', 'dreams_vect', 'dress_vect', 'dressed_vect', 'dresser_vect', 'drink_vect', 'drinking_vect', 'drinks_vect', 'drive_vect', 'driver_vect', 'drivin_vect', 'driving_vect', 'drop_vect', 'dropped_vect', 'drug_vect', 'drugs_vect', 'drunk_vect', 'dry_vect', 'ds_vect', 'dubsack_vect', 'dude_vect', 'due_vect', 'dun_vect', 'dunno_vect', 'durban_vect', 'dvd_vect', 'earlier_vect', 'early_vect', 'earth_vect', 'easier_vect', 'easily_vect', 'east_vect', 'easter_vect', 'easy_vect', 'eat_vect', 'eaten_vect', 'eatin_vect', 'eating_vect', 'ebay_vect', 'ec2a_vect', 'ee_vect', 'eek_vect', 'eerie_vect', 'effects_vect', 'eg_vect', 'egg_vect', 'eggs_vect', 'eh_vect', 'eight_vect', 'either_vect', 'ela_vect', 'electricity_vect', 'else_vect', 'elsewhere_vect', 'em_vect', 'email_vect', 'embarassed_vect', 'empty_vect', 'end_vect', 'ended_vect', 'ending_vect', 'ends_vect', 'enemy_vect', 'energy_vect', 'eng_vect', 'engin_vect', 'england_vect', 'english_vect', 'enjoy_vect', 'enjoyed_vect', 'enough_vect', 'enter_vect', 'entered_vect', 'entitled_vect', 'entry_vect', 'enuff_vect', 'envelope_vect', 'er_vect', 'erm_vect', 'escape_vect', 'especially_vect', 'esplanade_vect', 'eta_vect', 'etc_vect', 'euro_vect', 'euro2004_vect', 'europe_vect', 'eve_vect', 'eveb_vect', 'even_vect', 'evening_vect', 'event_vect', 'ever_vect', 'every_vect', 'everybody_vect', 'everyday_vect', 'everyone_vect', 'everything_vect', 'everywhere_vect', 'evn_vect', 'evng_vect', 'ex_vect', 'exact_vect', 'exactly_vect', 'exam_vect', 'exams_vect', 'excellent_vect', 'except_vect', 'exciting_vect', 'excuse_vect', 'excuses_vect', 'executive_vect', 'exeter_vect', 'exhausted_vect', 'expect_vect', 'expecting_vect', 'expensive_vect', 'experience_vect', 'expired_vect', 'expires_vect', 'explain_vect', 'explicit_vect', 'explosive_vect', 'express_vect', 'extra_vect', 'eye_vect', 'eyes_vect', 'fa_vect', 'fab_vect', 'face_vect', 'facebook_vect', 'fact_vect', 'faggy_vect', 'failed_vect', 'fair_vect', 'faith_vect', 'fall_vect', 'falls_vect', 'family_vect', 'fan_vect', 'fancies_vect', 'fancy_vect', 'fantasies_vect', 'fantastic_vect', 'fantasy_vect', 'far_vect', 'farm_vect', 'fast_vect', 'faster_vect', 'fat_vect', 'father_vect', 'fathima_vect', 'fault_vect', 'fave_vect', 'favorite_vect', 'favour_vect', 'favourite_vect', 'fb_vect', 'feb_vect', 'february_vect', 'feel_vect', 'feelin_vect', 'feeling_vect', 'feels_vect', 'fees_vect', 'feet_vect', 'fell_vect', 'felt_vect', 'fetch_vect', 'fever_vect', 'field_vect', 'fifteen_vect', 'fight_vect', 'fighting_vect', 'figure_vect', 'file_vect', 'files_vect', 'fill_vect', 'filling_vect', 'fills_vect', 'film_vect', 'final_vect', 'finally_vect', 'find_vect', 'fine_vect', 'fingers_vect', 'finish_vect', 'finished_vect', 'first_vect', 'fit_vect', 'fix_vect', 'fixed_vect', 'flag_vect', 'flaked_vect', 'flash_vect', 'flat_vect', 'flight_vect', 'flights_vect', 'flirt_vect', 'floor_vect', 'flower_vect', 'fml_vect', 'fo_vect', 'follow_vect', 'followed_vect', 'following_vect', 'fone_vect', 'food_vect', 'fool_vect', 'football_vect', 'force_vect', 'foreign_vect', 'forever_vect', 'forevr_vect', 'forget_vect', 'forgets_vect', 'forgiven_vect', 'forgot_vect', 'format_vect', 'forums_vect', 'forward_vect', 'forwarded_vect', 'found_vect', 'four_vect', 'fr_vect', 'fran_vect', 'freak_vect', 'free_vect', 'freefone_vect', 'freemsg_vect', 'freephone_vect', 'freezing_vect', 'fren_vect', 'frens_vect', 'fret_vect', 'fri_vect', 'friday_vect', 'friend_vect', 'friends_vect', 'friendship_vect', 'fringe_vect', 'frm_vect', 'frnd_vect', 'frnds_vect', 'frndship_vect', 'fuck_vect', 'fuckin_vect', 'fucking_vect', 'ful_vect', 'full_vect', 'fullonsmscom_vect', 'fun_vect', 'funny_vect', 'future_vect', 'fyi_vect', 'gal_vect', 'gals_vect', 'game_vect', 'games_vect', 'gang_vect', 'gap_vect', 'gaps_vect', 'garage_vect', 'garbage_vect', 'gary_vect', 'gas_vect', 'gautham_vect', 'gave_vect', 'gay_vect', 'gd_vect', 'ge_vect', 'gee_vect', 'geeee_vect', 'geeeee_vect', 'gender_vect', 'generally_vect', 'genius_vect', 'gentle_vect', 'gentleman_vect', 'gently_vect', 'germany_vect', 'get_vect', 'gets_vect', 'gettin_vect', 'getting_vect', 'gf_vect', 'gibbs_vect', 'gift_vect', 'gim_vect', 'girl_vect', 'girls_vect', 'gist_vect', 'giv_vect', 'give_vect', 'given_vect', 'gives_vect', 'giving_vect', 'glad_vect', 'gn_vect', 'go_vect', 'goal_vect', 'god_vect', 'goes_vect', 'goin_vect', 'going_vect', 'gold_vect', 'gon_vect', 'gona_vect', 'gone_vect', 'good_vect', 'goodmorning_vect', 'goodnight_vect', 'goodnite_vect', 'google_vect', 'gorgeous_vect', 'gossip_vect', 'got_vect', 'goto_vect', 'gotten_vect', 'govtinstituitions_vect', 'gr8_vect', 'grace_vect', 'gram_vect', 'grand_vect', 'granite_vect', 'gravity_vect', 'great_vect', 'green_vect', 'greet_vect', 'greetings_vect', 'grins_vect', 'grl_vect', 'ground_vect', 'group_vect', 'gt_vect', 'guaranteed_vect', 'gud_vect', 'gudnite_vect', 'guess_vect', 'guessing_vect', 'guide_vect', 'guilty_vect', 'guy_vect', 'guys_vect', 'gym_vect', 'ha_vect', 'haf_vect', 'haha_vect', 'hai_vect', 'hair_vect', 'haiz_vect', 'half_vect', 'halloween_vect', 'ham_vect', 'hand_vect', 'handed_vect', 'handle_vect', 'hands_vect', 'handset_vect', 'hanging_vect', 'happen_vect', 'happened_vect', 'happening_vect', 'happens_vect', 'happiness_vect', 'happy_vect', 'hard_vect', 'hardcore_vect', 'hardly_vect', 'harry_vect', 'hate_vect', 'hav_vect', 'havent_vect', 'havenåõt_vect', 'havin_vect', 'head_vect', 'headache_vect', 'headin_vect', 'heads_vect', 'hear_vect', 'heard_vect', 'heart_vect', 'heavy_vect', 'hee_vect', 'height_vect', 'held_vect', 'helen_vect', 'hell_vect', 'hella_vect', 'hello_vect', 'help_vect', 'help08712400602450p_vect', 'helpline_vect', 'hence_vect', 'henry_vect', 'heri_vect', 'herlove_vect', 'hes_vect', 'hex_vect', 'hey_vect', 'hgsuite3422lands_vect', 'hi_vect', 'hide_vect', 'high_vect', 'hill_vect', 'hint_vect', 'hip_vect', 'history_vect', 'hit_vect', 'hiya_vect', 'hl_vect', 'hlp_vect', 'hmm_vect', 'hmmm_vect', 'hmv_vect', 'ho_vect', 'hockey_vect', 'hol_vect', 'hold_vect', 'holder_vect', 'holding_vect', 'holiday_vect', 'holla_vect', 'hols_vect', 'holy_vect', 'home_vect', 'honey_vect', 'hook_vect', 'hop_vect', 'hope_vect', 'hoped_vect', 'hopefully_vect', 'hoping_vect', 'horny_vect', 'horo_vect', 'horrible_vect', 'hospital_vect', 'hospitals_vect', 'hostel_vect', 'hot_vect', 'hotel_vect', 'hotels_vect', 'hour_vect', 'hours_vect', 'house_vect', 'housemaid_vect', 'however_vect', 'hows_vect', 'howz_vect', 'hr_vect', 'hrishi_vect', 'hrs_vect', 'http_vect', 'hubby_vect', 'hug_vect', 'huh_vect', 'hun_vect', 'hungry_vect', 'hunny_vect', 'hurry_vect', 'hurt_vect', 'hurting_vect', 'hurts_vect', 'husband_vect', 'hv_vect', 'hw_vect', 'hyde_vect', 'iam_vect', 'ibhltd_vect', 'ibiza_vect', 'ic_vect', 'ice_vect', 'id_vect', 'idea_vect', 'ideal_vect', 'ideas_vect', 'identifier_vect', 'idiot_vect', 'idk_vect', 'ignore_vect', 'ikea_vect', 'il_vect', 'ill_vect', 'im_vect', 'imagine_vect', 'imma_vect', 'immediately_vect', 'imp_vect', 'important_vect', 'impossible_vect', 'improved_vect', 'in2_vect', 'inc_vect', 'inches_vect', 'incident_vect', 'include_vect', 'including_vect', 'inclusive_vect', 'inconsiderate_vect', 'indeed_vect', 'india_vect', 'indian_vect', 'indians_vect', 'indicate_vect', 'infections_vect', 'infernal_vect', 'info_vect', 'inform_vect', 'information_vect', 'informed_vect', 'innings_vect', 'insha_vect', 'inside_vect', 'instantly_vect', 'instead_vect', 'instructions_vect', 'insurance_vect', 'intelligent_vect', 'interest_vect', 'interested_vect', 'interesting_vect', 'interflora_vect', 'internet_vect', 'intro_vect', 'invest_vect', 'invite_vect', 'invited_vect', 'inviting_vect', 'iouri_vect', 'ip4_vect', 'ipod_vect', 'irritating_vect', 'iscoming_vect', 'ish_vect', 'island_vect', 'islands_vect', 'isnt_vect', 'issue_vect', 'issues_vect', 'it_vect', 'italian_vect', 'its_vect', 'itz_vect', 'itåõs_vect', 'ive_vect', 'iz_vect', 'izzit_vect', 'iåõm_vect', 'jacket_vect', 'jamster_vect', 'jan_vect', 'january_vect', 'jason_vect', 'java_vect', 'jay_vect', 'jazz_vect', 'jealous_vect', 'jeans_vect', 'jen_vect', 'jenny_vect', 'jess_vect', 'jesus_vect', 'jiayin_vect', 'jiu_vect', 'jo_vect', 'joanna_vect', 'job_vect', 'jogging_vect', 'john_vect', 'join_vect', 'joined_vect', 'joining_vect', 'joke_vect', 'jokes_vect', 'jokin_vect', 'joking_vect', 'jolly_vect', 'joy_vect', 'jsco_vect', 'jst_vect', 'juan_vect', 'juicy_vect', 'july_vect', 'june_vect', 'jus_vect', 'juz_vect', 'k52_vect', 'kadeem_vect', 'kaiez_vect', 'kallis_vect', 'kano_vect', 'kappa_vect', 'karaoke_vect', 'kate_vect', 'kay_vect', 'kb_vect', 'ke_vect', 'keep_vect', 'keeping_vect', 'keeps_vect', 'kent_vect', 'kept_vect', 'kerala_vect', 'key_vect', 'keys_vect', 'ki_vect', 'kick_vect', 'kid_vect', 'kidding_vect', 'kids_vect', 'kidz_vect', 'kind_vect', 'kinda_vect', 'kindly_vect', 'king_vect', 'kiss_vect', 'kisses_vect', 'kk_vect', 'knackered_vect', 'knew_vect', 'knock_vect', 'know_vect', 'knowing_vect', 'knows_vect', 'knw_vect', 'kz_vect', 'l8r_vect', 'la_vect', 'lab_vect', 'ladies_vect', 'lady_vect', 'lag_vect', 'laid_vect', 'land_vect', 'landline_vect', 'langport_vect', 'language_vect', 'laptop_vect', 'lar_vect', 'largest_vect', 'last_vect', 'late_vect', 'later_vect', 'latest_vect', 'latr_vect', 'laugh_vect', 'laughing_vect', 'law_vect', 'lazy_vect', 'ldn_vect', 'ldnw15h_vect', 'le_vect', 'lead_vect', 'learn_vect', 'least_vect', 'leave_vect', 'leaves_vect', 'leaving_vect', 'lect_vect', 'lecture_vect', 'left_vect', 'legal_vect', 'legs_vect', 'leh_vect', 'lei_vect', 'lem_vect', 'less_vect', 'lesson_vect', 'lessons_vect', 'let_vect', 'lets_vect', 'letter_vect', 'letters_vect', 'liao_vect', 'library_vect', 'lick_vect', 'licks_vect', 'lido_vect', 'lie_vect', 'lies_vect', 'life_vect', 'lifetime_vect', 'lift_vect', 'light_vect', 'lik_vect', 'like_vect', 'liked_vect', 'likely_vect', 'likes_vect', 'lil_vect', 'line_vect', 'linerental_vect', 'lines_vect', 'link_vect', 'lion_vect', 'lionm_vect', 'lionp_vect', 'lions_vect', 'lip_vect', 'list_vect', 'listen_vect', 'listening_vect', 'literally_vect', 'little_vect', 'live_vect', 'liverpool_vect', 'living_vect', 'lk_vect', 'll_vect', 'lmao_vect', 'lo_vect', 'loads_vect', 'loan_vect', 'loans_vect', 'local_vect', 'locations_vect', 'lock_vect', 'log_vect', 'login_vect', 'logo_vect', 'logopic_vect', 'lol_vect', 'london_vect', 'lonely_vect', 'long_vect', 'longer_vect', 'look_vect', 'lookatme_vect', 'looked_vect', 'lookin_vect', 'looking_vect', 'looks_vect', 'lor_vect', 'lose_vect', 'losing_vect', 'loss_vect', 'lost_vect', 'lot_vect', 'lotr_vect', 'lots_vect', 'lou_vect', 'loud_vect', 'lounge_vect', 'lousy_vect', 'lovable_vect', 'love_vect', 'loved_vect', 'lovely_vect', 'loveme_vect', 'lover_vect', 'loverboy_vect', 'lovers_vect', 'loves_vect', 'loving_vect', 'low_vect', 'lower_vect', 'loyal_vect', 'loyalty_vect', 'ls15hb_vect', 'lt_vect', 'ltd_vect', 'luck_vect', 'lucky_vect', 'lucy_vect', 'lunch_vect', 'luv_vect', 'lux_vect', 'luxury_vect', 'lyf_vect', 'lyfu_vect', 'lyk_vect', 'm227xy_vect', 'm26_vect', 'm263uz_vect', 'm8s_vect', 'mac_vect', 'machan_vect', 'macho_vect', 'mad_vect', 'madam_vect', 'made_vect', 'mag_vect', 'maga_vect', 'magical_vect', 'mah_vect', 'mail_vect', 'mailbox_vect', 'main_vect', 'maintain_vect', 'major_vect', 'make_vect', 'makes_vect', 'makin_vect', 'making_vect', 'malaria_vect', 'male_vect', 'mall_vect', 'man_vect', 'managed_vect', 'management_vect', 'many_vect', 'map_vect', 'march_vect', 'mark_vect', 'market_vect', 'marriage_vect', 'married_vect', 'marry_vect', 'masters_vect', 'match_vect', 'matches_vect', 'mate_vect', 'mates_vect', 'matrix3_vect', 'matter_vect', 'max10mins_vect', 'maximize_vect', 'maxå_vect', 'may_vect', 'mayb_vect', 'maybe_vect', 'mca_vect', 'mcat_vect', 'meal_vect', 'mean_vect', 'meaning_vect', 'means_vect', 'meant_vect', 'meanwhile_vect', 'med_vect', 'medical_vect', 'medicine_vect', 'meds_vect', 'meet_vect', 'meetin_vect', 'meeting_vect', 'meh_vect', 'mei_vect', 'member_vect', 'members_vect', 'men_vect', 'menu_vect', 'merry_vect', 'mess_vect', 'message_vect', 'messaged_vect', 'messages_vect', 'messaging_vect', 'met_vect', 'mid_vect', 'middle_vect', 'midnight_vect', 'mids_vect', 'might_vect', 'miles_vect', 'milk_vect', 'min_vect', 'mind_vect', 'mine_vect', 'mini_vect', 'minimum_vect', 'minor_vect', 'mins_vect', 'minute_vect', 'minutes_vect', 'minuts_vect', 'miracle_vect', 'mis_vect', 'miserable_vect', 'miss_vect', 'missed_vect', 'missin_vect', 'missing_vect', 'mistake_vect', 'mistakes_vect', 'mite_vect', 'mm_vect', 'mmm_vect', 'mmmm_vect', 'mmmmmm_vect', 'mnths_vect', 'mo_vect', 'moan_vect', 'mob_vect', 'mobile_vect', 'mobiles_vect', 'mobilesdirect_vect', 'mobilesvary_vect', 'mobileupd8_vect', 'mobno_vect', 'moby_vect', 'mode_vect', 'model_vect', 'module_vect', 'modules_vect', 'moji_vect', 'mojibiola_vect', 'mokka_vect', 'mom_vect', 'moment_vect', 'moments_vect', 'moms_vect', 'mon_vect', 'monday_vect', 'money_vect', 'monkeys_vect', 'mono_vect', 'month_vect', 'monthly_vect', 'months_vect', 'mood_vect', 'moon_vect', 'moral_vect', 'morn_vect', 'morning_vect', 'mostly_vect', 'mother_vect', 'motorola_vect', 'mouth_vect', 'move_vect', 'moved_vect', 'movie_vect', 'movies_vect', 'moving_vect', 'mp3_vect', 'mr_vect', 'mrng_vect', 'mrt_vect', 'msg_vect', 'msgs_vect', 'mths_vect', 'mu_vect', 'much_vect', 'mum_vect', 'mummy_vect', 'murder_vect', 'murdered_vect', 'murderer_vect', 'music_vect', 'must_vect', 'muz_vect', 'na_vect', 'nag_vect', 'nah_vect', 'naked_vect', 'name_vect', 'name1_vect', 'name2_vect', 'named_vect', 'names_vect', 'nan_vect', 'nap_vect', 'nasdaq_vect', 'nat_vect', 'national_vect', 'natural_vect', 'nature_vect', 'naughty_vect', 'nb_vect', 'nd_vect', 'ne_vect', 'near_vect', 'nearly_vect', 'necessarily_vect', 'necklace_vect', 'ned_vect', 'need_vect', 'needed_vect', 'needs_vect', 'neither_vect', 'net_vect', 'netcollex_vect', 'network_vect', 'networks_vect', 'neva_vect', 'never_vect', 'new_vect', 'newest_vect', 'news_vect', 'next_vect', 'ni8_vect', 'nice_vect', 'nigeria_vect', 'night_vect', 'nights_vect', 'nimya_vect', 'nite_vect', 'no1_vect', 'nobody_vect', 'noe_vect', 'nokia_vect', 'nokias_vect', 'noline_vect', 'none_vect', 'noon_vect', 'nope_vect', 'norm_vect', 'norm150ptone_vect', 'normal_vect', 'normally_vect', 'northampton_vect', 'note_vect', 'nothin_vect', 'nothing_vect', 'notice_vect', 'noun_vect', 'nowadays_vect', 'nt_vect', 'ntt_vect', 'ntwk_vect', 'num_vect', 'number_vect', 'numbers_vect', 'nuther_vect', 'nvm_vect', 'nw_vect', 'nxt_vect', 'nyc_vect', 'nydc_vect', 'nyt_vect', 'o2_vect', 'obviously_vect', 'odi_vect', 'offer_vect', 'offers_vect', 'office_vect', 'official_vect', 'officially_vect', 'ofice_vect', 'often_vect', 'oh_vect', 'oi_vect', 'oic_vect', 'ok_vect', 'okay_vect', 'okey_vect', 'okie_vect', 'ola_vect', 'old_vect', 'omg_vect', 'omw_vect', 'one_vect', 'ones_vect', 'oni_vect', 'online_vect', 'onto_vect', 'onwards_vect', 'oooh_vect', 'oops_vect', 'open_vect', 'opening_vect', 'operator_vect', 'opinion_vect', 'opportunity_vect', 'opt_vect', 'option_vect', 'optout_vect', 'or2stoptxt_vect', 'orange_vect', 'oranges_vect', 'orchard_vect', 'order_vect', 'ordered_vect', 'oredi_vect', 'original_vect', 'oru_vect', 'os_vect', 'oso_vect', 'others_vect', 'otherwise_vect', 'otside_vect', 'outside_vect', 'outstanding_vect', 'outta_vect', 'ovulation_vect', 'oz_vect', 'pa_vect', 'pack_vect', 'package_vect', 'page_vect', 'pages_vect', 'paid_vect', 'pain_vect', 'painful_vect', 'painting_vect', 'panic_vect', 'paper_vect', 'papers_vect', 'paperwork_vect', 'parco_vect', 'parent_vect', 'parents_vect', 'paris_vect', 'park_vect', 'parked_vect', 'parking_vect', 'part_vect', 'partner_vect', 'partnership_vect', 'party_vect', 'pass_vect', 'passed_vect', 'password_vect', 'past_vect', 'pattern_vect', 'patty_vect', 'pay_vect', 'paying_vect', 'payment_vect', 'payoh_vect', 'pc_vect', 'peace_vect', 'pence_vect', 'people_vect', 'per_vect', 'perfect_vect', 'period_vect', 'person_vect', 'personal_vect', 'pete_vect', 'petey_vect', 'pg_vect', 'philosophy_vect', 'phoenix_vect', 'phone_vect', 'phones_vect', 'photo_vect', 'photos_vect', 'pic_vect', 'pick_vect', 'picked_vect', 'picking_vect', 'pics_vect', 'picsfree1_vect', 'picture_vect', 'pictures_vect', 'pie_vect', 'pieces_vect', 'pig_vect', 'pilates_vect', 'pin_vect', 'pink_vect', 'piss_vect', 'pissed_vect', 'pix_vect', 'pizza_vect', 'place_vect', 'places_vect', 'plan_vect', 'planned_vect', 'planning_vect', 'plans_vect', 'play_vect', 'played_vect', 'player_vect', 'players_vect', 'playing_vect', 'please_vect', 'pleased_vect', 'pleasure_vect', 'plenty_vect', 'pls_vect', 'plus_vect', 'plz_vect', 'pm_vect', 'po_vect', 'pobox_vect', 'pobox334_vect', 'pobox36504w45wq_vect', 'pobox45w2tg150p_vect', 'pobox84_vect', 'pod_vect', 'point_vect', 'points_vect', 'poker_vect', 'pole_vect', 'police_vect', 'politicians_vect', 'poly_vect', 'polyphonic_vect', 'polys_vect', 'pongal_vect', 'poor_vect', 'pop_vect', 'popped_vect', 'porn_vect', 'possession_vect', 'possible_vect', 'post_vect', 'postcard_vect', 'postcode_vect', 'posted_vect', 'posts_vect', 'potential_vect', 'potter_vect', 'pound_vect', 'pounds_vect', 'pouts_vect', 'power_vect', 'ppl_vect', 'pple_vect', 'ppm_vect', 'prabha_vect', 'practice_vect', 'practicing_vect', 'pray_vect', 'prefer_vect', 'premier_vect', 'prepare_vect', 'prescription_vect', 'present_vect', 'press_vect', 'pretty_vect', 'prey_vect', 'price_vect', 'prince_vect', 'princess_vect', 'print_vect', 'privacy_vect', 'private_vect', 'prize_vect', 'prob_vect', 'probably_vect', 'problem_vect', 'problems_vect', 'process_vect', 'processed_vect', 'prof_vect', 'profit_vect', 'program_vect', 'project_vect', 'prolly_vect', 'promise_vect', 'promises_vect', 'promo_vect', 'proof_vect', 'properly_vect', 'prospects_vect', 'provided_vect', 'ps_vect', 'ptbo_vect', 'pub_vect', 'pull_vect', 'purchase_vect', 'purity_vect', 'purpose_vect', 'push_vect', 'pushes_vect', 'pussy_vect', 'put_vect', 'puttin_vect', 'putting_vect', 'qatar_vect', 'quality_vect', 'queen_vect', 'ques_vect', 'question_vect', 'questions_vect', 'quick_vect', 'quickly_vect', 'quiet_vect', 'quit_vect', 'quite_vect', 'quiz_vect', 'quote_vect', 'quoting_vect', 'racing_vect', 'radio_vect', 'railway_vect', 'rain_vect', 'raining_vect', 'raise_vect', 'rakhesh_vect', 'rally_vect', 'ran_vect', 'random_vect', 'randomly_vect', 'randy_vect', 'rang_vect', 'range_vect', 'ranjith_vect', 'rate_vect', 'rates_vect', 'rather_vect', 'rays_vect', 'rcvd_vect', 'rd_vect', 're_vect', 'reach_vect', 'reached_vect', 'reaching_vect', 'reaction_vect', 'read_vect', 'readers_vect', 'reading_vect', 'ready_vect', 'real_vect', 'realise_vect', 'reality_vect', 'realized_vect', 'really_vect', 'realy_vect', 'reason_vect', 'reasonable_vect', 'reasons_vect', 'reboot_vect', 'recd_vect', 'receipt_vect', 'receive_vect', 'received_vect', 'receiving_vect', 'recent_vect', 'recently_vect', 'recession_vect', 'record_vect', 'records_vect', 'recovery_vect', 'red_vect', 'ref_vect', 'reference_vect', 'reg_vect', 'regards_vect', 'register_vect', 'registered_vect', 'regret_vect', 'regular_vect', 'relation_vect', 'relax_vect', 'released_vect', 'rem_vect', 'remain_vect', 'remains_vect', 'remember_vect', 'remembered_vect', 'remembr_vect', 'remind_vect', 'reminder_vect', 'remove_vect', 'rent_vect', 'rental_vect', 'rentl_vect', 'repair_vect', 'repeat_vect', 'replied_vect', 'reply_vect', 'replying_vect', 'report_vect', 'representative_vect', 'request_vect', 'requests_vect', 'research_vect', 'resend_vect', 'respect_vect', 'respond_vect', 'responding_vect', 'response_vect', 'responsibility_vect', 'rest_vect', 'restaurant_vect', 'result_vect', 'results_vect', 'retrieve_vect', 'return_vect', 'returned_vect', 'returns_vect', 'reveal_vect', 'revealed_vect', 'review_vect', 'revision_vect', 'reward_vect', 'rhythm_vect', 'rice_vect', 'rich_vect', 'ride_vect', 'right_vect', 'rights_vect', 'ring_vect', 'ringtone_vect', 'ringtones_vect', 'rite_vect', 'river_vect', 'road_vect', 'roads_vect', 'roast_vect', 'rock_vect', 'rocks_vect', 'rofl_vect', 'roger_vect', 'role_vect', 'ron_vect', 'room_vect', 'roommate_vect', 'roommates_vect', 'rooms_vect', 'rose_vect', 'round_vect', 'row_vect', 'roww1j6hl_vect', 'roww1jhl_vect', 'royal_vect', 'rply_vect', 'rs_vect', 'rstm_vect', 'ru_vect', 'rub_vect', 'rude_vect', 'rule_vect', 'run_vect', 'running_vect', 'runs_vect', 'rush_vect', 'sacrifice_vect', 'sad_vect', 'sae_vect', 'safe_vect', 'said_vect', 'salam_vect', 'salary_vect', 'sale_vect', 'salon_vect', 'sam_vect', 'santa_vect', 'sar_vect', 'sarasota_vect', 'sarcastic_vect', 'sary_vect', 'sat_vect', 'sathya_vect', 'saturday_vect', 'savamob_vect', 'save_vect', 'saved_vect', 'saw_vect', 'say_vect', 'saying_vect', 'says_vect', 'scared_vect', 'scary_vect', 'sch_vect', 'schedule_vect', 'school_vect', 'schools_vect', 'science_vect', 'scold_vect', 'score_vect', 'scores_vect', 'scotland_vect', 'scream_vect', 'screaming_vect', 'scrounge_vect', 'se_vect', 'sea_vect', 'search_vect', 'searching_vect', 'season_vect', 'seat_vect', 'sec_vect', 'second_vect', 'seconds_vect', 'secret_vect', 'secretary_vect', 'secs_vect', 'sed_vect', 'see_vect', 'seeing_vect', 'seem_vect', 'seemed_vect', 'seems_vect', 'seen_vect', 'selected_vect', 'selection_vect', 'self_vect', 'sell_vect', 'selling_vect', 'sells_vect', 'sem_vect', 'semester_vect', 'sen_vect', 'send_vect', 'sender_vect', 'sending_vect', 'sense_vect', 'sent_vect', 'sentence_vect', 'sept_vect', 'series_vect', 'serious_vect', 'seriously_vect', 'service_vect', 'services_vect', 'serving_vect', 'set_vect', 'setting_vect', 'settings_vect', 'settle_vect', 'settled_vect', 'seven_vect', 'several_vect', 'sex_vect', 'sexy_vect', 'sh_vect', 'sha_vect', 'shall_vect', 'share_vect', 'shd_vect', 'sheets_vect', 'shes_vect', 'shesil_vect', 'shining_vect', 'ship_vect', 'shipping_vect', 'shirt_vect', 'shirts_vect', 'shit_vect', 'shld_vect', 'shocking_vect', 'shoot_vect', 'shop_vect', 'shoppin_vect', 'shopping_vect', 'short_vect', 'shorter_vect', 'shortly_vect', 'shot_vect', 'shoving_vect', 'show_vect', 'shower_vect', 'showing_vect', 'shows_vect', 'shu_vect', 'shuhui_vect', 'shy_vect', 'si_vect', 'sick_vect', 'side_vect', 'sighs_vect', 'sight_vect', 'sign_vect', 'signing_vect', 'silence_vect', 'silent_vect', 'silver_vect', 'sim_vect', 'simple_vect', 'simply_vect', 'since_vect', 'sing_vect', 'singing_vect', 'single_vect', 'singles_vect', 'sipix_vect', 'sir_vect', 'sis_vect', 'sister_vect', 'sit_vect', 'site_vect', 'sitting_vect', 'situation_vect', 'siva_vect', 'six_vect', 'size_vect', 'sk3_vect', 'sk38xh_vect', 'skilgme_vect', 'skip_vect', 'sky_vect', 'skype_vect', 'skyped_vect', 'slap_vect', 'slave_vect', 'sleep_vect', 'sleepin_vect', 'sleeping_vect', 'sleepy_vect', 'slept_vect', 'slice_vect', 'slide_vect', 'slightly_vect', 'slip_vect', 'slippers_vect', 'slo_vect', 'slots_vect', 'slow_vect', 'slowly_vect', 'small_vect', 'smashed_vect', 'smile_vect', 'smiles_vect', 'smiling_vect', 'smoke_vect', 'smoking_vect', 'sms_vect', 'smth_vect', 'sn_vect', 'snake_vect', 'snow_vect', 'social_vect', 'sofa_vect', 'soft_vect', 'software_vect', 'sol_vect', 'some1_vect', 'somebody_vect', 'someone_vect', 'somethin_vect', 'something_vect', 'sometimes_vect', 'somewhere_vect', 'song_vect', 'songs_vect', 'sony_vect', 'sonyericsson_vect', 'soon_vect', 'sooner_vect', 'sore_vect', 'sorry_vect', 'sort_vect', 'sorting_vect', 'sound_vect', 'sounds_vect', 'south_vect', 'sp_vect', 'space_vect', 'spanish_vect', 'speak_vect', 'speaking_vect', 'special_vect', 'specialcall_vect', 'specially_vect', 'speed_vect', 'spend_vect', 'spending_vect', 'spent_vect', 'spk_vect', 'spoke_vect', 'spoken_vect', 'spook_vect', 'sport_vect', 'sports_vect', 'spree_vect', 'spring_vect', 'sptv_vect', 'sry_vect', 'st_vect', 'staff_vect', 'stamps_vect', 'stand_vect', 'standard_vect', 'standing_vect', 'star_vect', 'staring_vect', 'start_vect', 'started_vect', 'starting_vect', 'starts_vect', 'starwars3_vect', 'statement_vect', 'station_vect', 'stay_vect', 'stayed_vect', 'staying_vect', 'std_vect', 'steam_vect', 'step_vect', 'steve_vect', 'stick_vect', 'sticky_vect', 'still_vect', 'stock_vect', 'stockport_vect', 'stomach_vect', 'stomps_vect', 'stones_vect', 'stop_vect', 'stopped_vect', 'stops_vect', 'store_vect', 'stores_vect', 'story_vect', 'str_vect', 'straight_vect', 'stranger_vect', 'street_vect', 'stress_vect', 'strike_vect', 'strong_vect', 'strongbuy_vect', 'stuck_vect', 'student_vect', 'study_vect', 'studying_vect', 'stuff_vect', 'stupid_vect', 'style_vect', 'stylish_vect', 'sub_vect', 'subpoly_vect', 'subs_vect', 'subscribe6gbpmnth_vect', 'subscribed_vect', 'subscriber_vect', 'subscription_vect', 'success_vect', 'successful_vect', 'successfully_vect', 'sucks_vect', 'sue_vect', 'sufficient_vect', 'suggest_vect', 'suite_vect', 'suits_vect', 'sum1_vect', 'summer_vect', 'sun_vect', 'sunday_vect', 'sunlight_vect', 'sunny_vect', 'sunshine_vect', 'suntec_vect', 'sup_vect', 'super_vect', 'superb_vect', 'superior_vect', 'supervisor_vect', 'supply_vect', 'support_vect', 'suppose_vect', 'supposed_vect', 'suprman_vect', 'sura_vect', 'sure_vect', 'surely_vect', 'surfing_vect', 'surprise_vect', 'surprised_vect', 'survey_vect', 'sux_vect', 'suzy_vect', 'sw7_vect', 'sw73ss_vect', 'sweet_vect', 'swing_vect', 'system_vect', 'ta_vect', 'tablets_vect', 'tahan_vect', 'take_vect', 'taken_vect', 'takes_vect', 'takin_vect', 'taking_vect', 'talent_vect', 'talk_vect', 'talking_vect', 'tampa_vect', 'tape_vect', 'tariffs_vect', 'tat_vect', 'taunton_vect', 'taylor_vect', 'tb_vect', 'tc_vect', 'tcrw1_vect', 'tcs_vect', 'tea_vect', 'teach_vect', 'teacher_vect', 'teaches_vect', 'team_vect', 'tear_vect', 'tease_vect', 'teasing_vect', 'tech_vect', 'technical_vect', 'tee_vect', 'teeth_vect', 'tel_vect', 'telephone_vect', 'tell_vect', 'telling_vect', 'tells_vect', 'telugu_vect', 'temple_vect', 'ten_vect', 'tenants_vect', 'tenerife_vect', 'tension_vect', 'term_vect', 'terms_vect', 'terrible_vect', 'test_vect', 'testing_vect', 'text_vect', 'texted_vect', 'texting_vect', 'textoperator_vect', 'texts_vect', 'th_vect', 'thangam_vect', 'thank_vect', 'thanks_vect', 'thanksgiving_vect', 'thanx_vect', 'that_vect', 'thats_vect', 'thatåõs_vect', 'the_vect', 'theatre_vect', 'themob_vect', 'theory_vect', 'thesis_vect', 'thgt_vect', 'thing_vect', 'things_vect', 'think_vect', 'thinkin_vect', 'thinking_vect', 'thinks_vect', 'thk_vect', 'thnk_vect', 'tho_vect', 'though_vect', 'thought_vect', 'three_vect', 'throat_vect', 'throw_vect', 'thru_vect', 'tht_vect', 'thts_vect', 'thurs_vect', 'thursday_vect', 'tick_vect', 'ticket_vect', 'tickets_vect', 'tight_vect', 'tihs_vect', 'til_vect', 'till_vect', 'time_vect', 'times_vect', 'timing_vect', 'tired_vect', 'tirunelvali_vect', 'tirupur_vect', 'tissco_vect', 'tkts_vect', 'tm_vect', 'tming_vect', 'tmobile_vect', 'tmr_vect', 'tncs_vect', 'toa_vect', 'toclaim_vect', 'today_vect', 'todays_vect', 'tog_vect', 'together_vect', 'tok_vect', 'told_vect', 'tomarrow_vect', 'tomo_vect', 'tomorrow_vect', 'tone_vect', 'tones_vect', 'tones2youcouk_vect', 'tonight_vect', 'tonite_vect', 'took_vect', 'tool_vect', 'tooo_vect', 'toot_vect', 'top_vect', 'topic_vect', 'torch_vect', 'toshiba_vect', 'tot_vect', 'total_vect', 'totally_vect', 'touch_vect', 'tough_vect', 'tour_vect', 'towards_vect', 'town_vect', 'track_vect', 'trade_vect', 'traffic_vect', 'train_vect', 'training_vect', 'transaction_vect', 'transfer_vect', 'transport_vect', 'travel_vect', 'treat_vect', 'treated_vect', 'tried_vect', 'trip_vect', 'trips_vect', 'trouble_vect', 'true_vect', 'truffles_vect', 'truly_vect', 'trust_vect', 'truth_vect', 'try_vect', 'trying_vect', 'ts_vect', 'tscs_vect', 'tscs087147403231winawk_vect', 'tt_vect', 'ttyl_vect', 'tues_vect', 'tuesday_vect', 'tuition_vect', 'turn_vect', 'turning_vect', 'turns_vect', 'tv_vect', 'twelve_vect', 'twice_vect', 'two_vect', 'txt_vect', 'txtauction_vect', 'txtin_vect', 'txting_vect', 'txtno_vect', 'txts_vect', 'txtstop_vect', 'tyler_vect', 'type_vect', 'tyrone_vect', 'u4_vect', 'ubi_vect', 'ufind_vect', 'ugh_vect', 'uh_vect', 'uk_vect', 'uks_vect', 'ultimatum_vect', 'umma_vect', 'unable_vect', 'uncle_vect', 'understand_vect', 'understanding_vect', 'understood_vect', 'underwear_vect', 'unemployed_vect', 'uni_vect', 'unique_vect', 'university_vect', 'unless_vect', 'unlimited_vect', 'unnecessarily_vect', 'unredeemed_vect', 'unsold_vect', 'unsub_vect', 'unsubscribe_vect', 'upd8_vect', 'update_vect', 'updatenow_vect', 'upgrade_vect', 'upload_vect', 'upset_vect', 'upstairs_vect', 'ur_vect', 'ure_vect', 'urgent_vect', 'urgnt_vect', 'url_vect', 'urn_vect', 'urself_vect', 'us_vect', 'usb_vect', 'use_vect', 'used_vect', 'user_vect', 'usf_vect', 'using_vect', 'usual_vect', 'usually_vect', 'vale_vect', 'valentine_vect', 'valentines_vect', 'valid_vect', 'valid12hrs_vect', 'valuable_vect', 'value_vect', 'valued_vect', 'vary_vect', 've_vect', 'vegas_vect', 'verify_vect', 'version_vect', 'via_vect', 'vid_vect', 'video_vect', 'videochat_vect', 'videophones_vect', 'vijay_vect', 'vikky_vect', 'village_vect', 'violated_vect', 'violence_vect', 'vip_vect', 'virgin_vect', 'visit_vect', 'vivek_vect', 'vl_vect', 'voda_vect', 'vodafone_vect', 'vodka_vect', 'voice_vect', 'voicemail_vect', 'vomit_vect', 'vote_vect', 'voucher_vect', 'vouchers_vect', 'vry_vect', 'vth_vect', 'w45wq_vect', 'wa_vect', 'wah_vect', 'wait_vect', 'waited_vect', 'waitin_vect', 'waiting_vect', 'wake_vect', 'waking_vect', 'wales_vect', 'walk_vect', 'walked_vect', 'walking_vect', 'walmart_vect', 'wan_vect', 'wana_vect', 'want_vect', 'wanted_vect', 'wanting_vect', 'wants_vect', 'wap_vect', 'warm_vect', 'warner_vect', 'waste_vect', 'wasted_vect', 'wat_vect', 'watch_vect', 'watching_vect', 'water_vect', 'wats_vect', 'way_vect', 'wc1n3xx_vect', 'we_vect', 'weak_vect', 'wear_vect', 'wearing_vect', 'weather_vect', 'web_vect', 'website_vect', 'wed_vect', 'wedding_vect', 'wednesday_vect', 'wee_vect', 'weed_vect', 'week_vect', 'weekend_vect', 'weekends_vect', 'weekly_vect', 'weeks_vect', 'weigh_vect', 'weight_vect', 'weird_vect', 'welcome_vect', 'well_vect', 'welp_vect', 'wen_vect', 'went_vect', 'west_vect', 'wet_vect', 'what_vect', 'whatever_vect', 'whats_vect', 'whenever_vect', 'whenevr_vect', 'wherever_vect', 'whether_vect', 'white_vect', 'whn_vect', 'whole_vect', 'whos_vect', 'whose_vect', 'wid_vect', 'widelivecomindex_vect', 'wif_vect', 'wife_vect', 'wil_vect', 'willing_vect', 'win_vect', 'wind_vect', 'wine_vect', 'winner_vect', 'winning_vect', 'wins_vect', 'wipro_vect', 'wisdom_vect', 'wise_vect', 'wish_vect', 'wishes_vect', 'wishing_vect', 'wit_vect', 'within_vect', 'without_vect', 'wiv_vect', 'wk_vect', 'wkend_vect', 'wkg_vect', 'wkly_vect', 'wks_vect', 'wld_vect', 'wml_vect', 'wn_vect', 'wnt_vect', 'wo_vect', 'woke_vect', 'woken_vect', 'woman_vect', 'women_vect', 'wonder_vect', 'wonderful_vect', 'wondering_vect', 'wont_vect', 'woot_vect', 'word_vect', 'words_vect', 'work_vect', 'workin_vect', 'working_vect', 'works_vect', 'world_vect', 'worried_vect', 'worries_vect', 'worry_vect', 'worse_vect', 'worst_vect', 'worth_vect', 'wot_vect', 'would_vect', 'wow_vect', 'write_vect', 'wrong_vect', 'wtf_vect', 'wud_vect', 'wuld_vect', 'wun_vect', 'www4tcbiz_vect', 'wwwcomuknet_vect', 'wwwetlpcoukexpressoffer_vect', 'wwwgetzedcouk_vect', 'wwwldewcom_vect', 'wwwldewcom1win150ppmx3age16_vect', 'wwwmovietriviatv_vect', 'wwwringtonescouk_vect', 'wwwsmsconet_vect', 'wwwtxttowincouk_vect', 'wwwurawinnercom_vect', 'wylie_vect', 'xchat_vect', 'xmas_vect', 'xuhui_vect', 'xx_vect', 'xxx_vect', 'xxxx_vect', 'xxxxx_vect', 'xy_vect', 'ya_vect', 'yahoo_vect', 'yan_vect', 'yar_vect', 'yay_vect', 'yck_vect', 'yeah_vect', 'year_vect', 'years_vect', 'yelling_vect', 'yellow_vect', 'yep_vect', 'yes_vect', 'yest_vect', 'yesterday_vect', 'yet_vect', 'yetunde_vect', 'yijue_vect', 'ym_vect', 'yo_vect', 'yoga_vect', 'yogasana_vect', 'yor_vect', 'you_vect', 'yr_vect', 'yrs_vect', 'yummy_vect', 'yun_vect', 'yuo_vect', 'yup_vect', 'zed_vect', 'zindgi_vect', 'ìï_vect', 'ûò_vect']}, 'training': {'algo': 'Logistic Regression', 'model_file': 'AI0110_1.sav'}} deployer = get_deployer('classification',params=config) deployer.run( )
output_formatter.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import platform import json import shutil import logging class outputFormatter: def __init__(self): self.log = logging.getLogger('eion') self.log.info('========> Inside Output Formatter') def crate_output_format_file(self,deploy_path,learner_type,modelType,model,output_label,threshold,trained_data_file,dictDiffCount,targetFeature,features,datetimeFeature): self.output_formatfile = 'import json' self.output_formatfile += '\n' self.output_formatfile += 'import numpy as np' self.output_formatfile += '\n' self.output_formatfile += 'import pandas as pd' self.output_formatfile += '\n' self.output_formatfile += 'import os' self.output_formatfile += '\n' self.output_formatfile += 'from pathlib import Path' self.output_formatfile += '\n' if((model.lower() in ['autoencoder','dbscan']) and modelType.lower()=="anomaly_detection"): self.output_formatfile += 'from script.aion_granularity import aion_gettimegranularity' self.output_formatfile += '\n' self.output_formatfile += 'class output_format(object):' self.output_formatfile += '\n' if(model == 'VAR'): self.output_formatfile += ' def invertTransformation(self,predictions):' self.output_formatfile += '\n' self.output_formatfile += ' datasetdf = pd.read_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)),"..","data","trainingdata.csv"))' self.output_formatfile += '\n' self.output_formatfile += ' dictDiffCount = '+str(dictDiffCount) self.output_formatfile += '\n' self.output_formatfile += ' targetFeature = "'+str(targetFeature)+'"' self.output_formatfile += '\n' self.output_formatfile += ' columns = targetFeature.split(",")' self.output_formatfile += '\n' self.output_formatfile += ' pred = pd.DataFrame(index=range(0,len(predictions)),columns=columns)' self.output_formatfile += '\n' self.output_formatfile += ' for j in range(0,len(columns)):' self.output_formatfile += '\n' self.output_formatfile += ' for i in range(0, len(predictions)):' self.output_formatfile += '\n' self.output_formatfile += ' pred.iloc[i][j] = round(predictions[i][j],2)' self.output_formatfile += '\n' self.output_formatfile += ' prediction = pred' self.output_formatfile += '\n' self.output_formatfile += ' for col in columns:' self.output_formatfile += '\n' self.output_formatfile += ' if col in dictDiffCount:' self.output_formatfile += '\n' self.output_formatfile += ' if dictDiffCount[col]==2:' self.output_formatfile += '\n' self.output_formatfile += ' prediction[col] = (datasetdf[col].iloc[-1]-datasetdf[col].iloc[-2]) + prediction[col].cumsum()' self.output_formatfile += '\n' self.output_formatfile += ' prediction[col] = datasetdf[col].iloc[-1] + prediction[col].cumsum()' self.output_formatfile += '\n' self.output_formatfile += ' prediction = pred' self.output_formatfile += '\n' self.output_formatfile += ' return(prediction)' self.output_formatfile += '\n' self.log.info("op:modelType: \n"+str(modelType)) if((model.lower() in ['autoencoder','dbscan']) and modelType.lower()=="anomaly_detection"): # if modelType == 'anomaly_detection': self.output_formatfile += ' def find_point_subsequence_anomalies(self,datetime_column,dataframe=None):' self.output_formatfile += '\n' self.output_formatfile += ' try:' self.output_formatfile += '\n' self.output_formatfile += ' dataframe[datetime_column] = pd.to_datetime(dataframe[datetime_column]) ' self.output_formatfile += '\n' self.output_formatfile += ' aion_gettimegranularity_obj=aion_gettimegranularity(dataframe,datetime_column) ' self.output_formatfile += '\n' self.output_formatfile += ' anomaly_info_df=aion_gettimegranularity_obj.get_granularity() ' self.output_formatfile += '\n' self.output_formatfile += ' except Exception as e:' self.output_formatfile += '\n' self.output_formatfile += ' print(f"find_point_subsequence_anomalies,: aion_gettimegranularity err msg:{e} ")\n' self.output_formatfile += ' return anomaly_info_df' self.output_formatfile += '\n' if((model.lower() in ['autoencoder','dbscan']) and modelType.lower()=="anomaly_detection"): if (datetimeFeature!='' and datetimeFeature!='NA'): self.output_formatfile += ' def apply_output_format(self,df,modeloutput,datetimeFeature):' self.output_formatfile += '\n' else: self.output_formatfile += ' def apply_output_format(self,df,modeloutput):' self.output_formatfile += '\n' else: self.output_formatfile += ' def apply_output_format(self,df,modeloutput):' self.output_formatfile += '\n' if modelType.lower() == 'classification': self.output_formatfile += ' modeloutput = round(modeloutput,2)' self.output_formatfile += '\n' if(learner_type == 'ImageClassification'): if(str(output_label) != '{}'): inv_mapping_dict = {v: k for k, v in output_label.items()} self.output_formatfile += ' le_dict = '+ str(inv_mapping_dict) self.output_formatfile += '\n' self.output_formatfile += ' predictions = []' self.output_formatfile += '\n' self.output_formatfile += ' for x in modeloutput:' self.output_formatfile += '\n' self.output_formatfile += ' x = le_dict[x]' self.output_formatfile += '\n' self.output_formatfile += ' predictions.append(x)' self.output_formatfile += '\n' else: self.output_formatfile += ' predictions=modeloutput' self.output_formatfile += '\n' self.output_formatfile += ' df[\'prediction\'] = predictions' self.output_formatfile += '\n' self.output_formatfile += ' outputjson = df.to_json(orient=\'records\')' self.output_formatfile += '\n' self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}' self.output_formatfile += '\n' elif(learner_type == 'Text Similarity'): self.output_formatfile += ' df[\'prediction\'] = np.where(modeloutput > '+str(threshold)+',1,0)' self.output_formatfile += '\n' self.output_formatfile += ' df[\'probability\'] = modeloutput' self.output_formatfile += '\n' self.output_formatfile += ' outputjson = df.to_json(orient=\'records\',double_precision=2)' self.output_formatfile += '\n' self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}' self.output_formatfile += '\n' elif(learner_type == 'TS'): if(model == 'VAR'): self.output_formatfile += ' modeloutput = self.invertTransformation(modeloutput)' self.output_formatfile += '\n' self.output_formatfile += ' modeloutput = modeloutput.to_json(orient=\'records\',double_precision=2)' self.output_formatfile += '\n' self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(modeloutput)}' elif(model.lower() == 'fbprophet'): self.output_formatfile += ' modeloutput = modeloutput.to_json(orient=\'records\')' self.output_formatfile += '\n' self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(modeloutput)}' elif((model.lower() == 'lstm' or model.lower() == 'mlp') and len(features) >= 1): self.output_formatfile += ' modeloutput = modeloutput.round(2)\n' self.output_formatfile += ' modeloutput = modeloutput.to_json(orient=\'records\')\n' self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(modeloutput)}\n' else: self.output_formatfile += ' modeloutput = modeloutput.round(2)' self.output_formatfile += '\n' self.output_formatfile += ' modeloutput = json.dumps(modeloutput.tolist())' self.output_formatfile += '\n' self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":eval(modeloutput)}' self.output_formatfile += '\n' elif(learner_type in ['RecommenderSystem','similarityIdentification','contextualSearch']): self.output_formatfile += '\n' self.output_formatfile += ' df[\'prediction\'] = modeloutput' self.output_formatfile += '\n' self.output_formatfile += ' outputjson = df.to_json(orient=\'records\',double_precision=2)' self.output_formatfile += '\n' self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}' self.output_formatfile += '\n' else: if(modelType == 'Classification' or modelType == 'TLClassification' or modelType == 'anomaly_detection'): if(modelType == 'Classification' or modelType == 'TLClassification' or modelType == 'anomaly_detection'): if(str(output_label) != '{}'): inv_mapping_dict = {v: k for k, v in output_label.items()} self.output_formatfile += ' le_dict = '+ str(inv_mapping_dict) self.output_formatfile += '\n' ''' if(model in ['SGDClassifier']): self.output_formatfile += ' modeloutput = modeloutput.replace({"predict_class": le_dict})' else: self.output_formatfile += ' modeloutput = modeloutput.rename(columns=le_dict)' ''' if modelType != 'anomaly_detection': self.output_formatfile += ' modeloutput = modeloutput.rename(columns=le_dict)' self.output_formatfile += '\n' if(threshold != -1): ''' if(model in ['SGDClassifier']): self.output_formatfile += ' df[\'prediction\'] = np.where(modeloutput[\'probability\'] > '+str(threshold)+',1,0)' self.output_formatfile += '\n' self.output_formatfile += ' df[\'probability\'] = modeloutput[\'probability\']' self.output_formatfile += '\n' self.output_formatfile += ' df[\'remarks\'] = ""' self.output_formatfile += '\n' else: self.output_formatfile += ' predictedData = modeloutput.iloc[:,1]' self.output_formatfile += '\n' self.output_formatfile += ' df[\'prediction\'] = np.where(predictedData > '+str(threshold)+',modeloutput.columns[1],modeloutput.columns[0])' self.output_formatfile += '\n' self.output_formatfile += ' df[\'probability\'] = np.where(df[\'prediction\'] == modeloutput.columns[1],modeloutput.iloc[:,1],modeloutput.iloc[:,0])' self.output_formatfile += '\n' self.output_formatfile += ' df[\'remarks\'] = modeloutput.apply(lambda x: x.to_json(double_precision=2), axis=1)' self.output_formatfile += '\n' ''' self.output_formatfile += ' predictedData = modeloutput.iloc[:,1]' self.output_formatfile += '\n' self.output_formatfile += ' df[\'prediction\'] = np.where(predictedData > '+str(threshold)+',modeloutput.columns[1],modeloutput.columns[0])' self.output_formatfile += '\n' self.output_formatfile += ' df[\'probability\'] = np.where(df[\'prediction\'] == modeloutput.columns[1],modeloutput.iloc[:,1],modeloutput.iloc[:,0])' self.output_formatfile += '\n' self.output_formatfile += ' df[\'remarks\'] = modeloutput.apply(lambda x: x.to_json(double_precision=2), axis=1)' self.output_formatfile += '\n' else: ''' if(model in ['SGDClassifier']): self.output_formatfile += ' df[\'prediction\'] = modeloutput[\'predict_class\']' self.output_formatfile += '\n' self.output_formatfile += ' df[\'probability\'] = ""' self.output_formatfile += '\n' self.output_formatfile += ' df[\'remarks\'] = "NA"' self.output_formatfile += '\n' else: self.output_formatfile += ' df[\'prediction\'] = modeloutput.idxmax(axis=1)' self.output_formatfile += '\n' self.output_formatfile += ' df[\'probability\'] = modeloutput.max(axis=1)' self.output_formatfile += '\n' self.output_formatfile += ' df[\'remarks\'] = modeloutput.apply(lambda x: x.to_json(double_precision=2), axis=1)' self.output_formatfile += '\n' ''' if modelType == 'anomaly_detection': # if (model.lower()=='autoencoder'): if model.lower() in ['autoencoder']: if (datetimeFeature != '' and datetimeFeature.lower() != 'na'): self.output_formatfile += ' df[modeloutput.columns] = modeloutput\n' self.output_formatfile += ' anomaly_df=df[df[\'anomaly\'] == True]\n' self.output_formatfile += ' anomaly_prediction_df=self.find_point_subsequence_anomalies(datetimeFeature,anomaly_df)\n' self.output_formatfile += ' new_dir = str(Path(__file__).parent.parent/\'data\')\n' self.output_formatfile += ' anomaly_prediction_df.to_csv(f"{new_dir}/anomaly_data.csv")\n' self.output_formatfile += ' try:\n' self.output_formatfile += ' anomaly_prediction_df[datetimeFeature]=pd.to_datetime(anomaly_prediction_df[datetimeFeature])\n' self.output_formatfile += ' df[datetimeFeature]=pd.to_datetime(df[datetimeFeature])\n' self.output_formatfile += ' anomaly_prediction_df.drop("Time_diff",axis=1,inplace=True)\n' self.output_formatfile += ' except:\n' self.output_formatfile += ' pass\n' self.output_formatfile += ' try:\n' self.output_formatfile += ' df_out = pd.merge(df, anomaly_prediction_df, on=df.columns.values.tolist(), how=\'left\')\n' self.output_formatfile += ' df_out[\'anomaly\'].replace([\'None\', \'NaN\', np.nan], "Normal", inplace=True)\n' self.output_formatfile += ' df_out[\'anomalyType\'].replace([\'None\', \'NaN\', np.nan], "Normal", inplace=True)\n' self.output_formatfile += ' df_out.to_csv(f"{new_dir}/overall_ad_output.csv") \n' self.output_formatfile += ' df_out[datetimeFeature]=df_out[datetimeFeature].astype(str) \n' self.output_formatfile += ' df_out.drop("time_diff",axis=1,inplace=True)\n' self.output_formatfile += ' except Exception as e:\n' self.output_formatfile += ' print("anomaly data updated issue",e)\n' self.output_formatfile += ' df_out[datetimeFeature]=df_out[datetimeFeature].astype(str)\n' self.output_formatfile += ' df=df_out \n' else: self.output_formatfile += ' df[modeloutput.columns] = modeloutput\n' elif (model.lower()=='dbscan'): if (datetimeFeature != '' and datetimeFeature.lower() != 'na'): self.output_formatfile += ' df[\'anomaly\'] = modeloutput[\'cluster\']== -1\n' self.output_formatfile += ' anomaly_df=df[df[\'anomaly\'] == True]\n' self.output_formatfile += ' anomaly_prediction_df=self.find_point_subsequence_anomalies(datetimeFeature,anomaly_df)\n' self.output_formatfile += ' new_dir = str(Path(__file__).parent.parent/\'data\')\n' self.output_formatfile += ' try:\n' self.output_formatfile += ' anomaly_prediction_df[datetimeFeature]=pd.to_datetime(anomaly_prediction_df[datetimeFeature])\n' self.output_formatfile += ' df[datetimeFeature]=pd.to_datetime(df[datetimeFeature])\n' self.output_formatfile += ' except:\n' self.output_formatfile += ' pass\n' self.output_formatfile += ' try:\n' self.output_formatfile += ' df_out = pd.merge(df, anomaly_prediction_df, on=df.columns.values.tolist(), how=\'left\')\n' self.output_formatfile += ' df_out[\'anomaly\'].replace([\'None\', \'NaN\', np.nan], "Normal", inplace=True)\n' self.output_formatfile += ' df_out.to_csv(f"{new_dir}/overall_ad_output.csv") \n' self.output_formatfile += ' df_out[datetimeFeature]=df_out[datetimeFeature].astype(str)\n' self.output_formatfile += ' except Exception as e:\n' self.output_formatfile += ' print("anomaly data updated.")\n' self.output_formatfile += ' df_out[datetimeFeature]=df_out[datetimeFeature].astype(str)\n' self.output_formatfile += ' df=df_out \n' else: self.output_formatfile += ' df[\'anomaly\'] = modeloutput[\'cluster\']== -1\n' self.output_formatfile += ' df.sort_values(by=[\'anomaly\'], ascending=False, inplace=True)\n' else: self.output_formatfile += ' df[\'prediction\'] = modeloutput' self.output_formatfile += '\n' else: self.output_formatfile += ' df[\'prediction\'] = modeloutput.idxmax(axis=1)' self.output_formatfile += '\n' if learner_type != 'DL': self.output_formatfile += ' df[\'probability\'] = modeloutput.max(axis=1).round(2)' self.output_formatfile += '\n' self.output_formatfile += ' df[\'remarks\'] = modeloutput.apply(lambda x: x.to_json(double_precision=2), axis=1)' self.output_formatfile += '\n' else: if model == 'COX': self.output_formatfile += '\n' self.output_formatfile += ' modeloutput[0] = modeloutput[0].round(2)' self.output_formatfile += '\n' #self.output_formatfile += ' modeloutput = modeloutput[0].to_json(orient=\'records\',double_precision=2)' #self.output_formatfile += '\n' self.output_formatfile += ' df[\'prediction\'] = modeloutput' self.output_formatfile += '\n' else: self.output_formatfile += ' df[\'prediction\'] = modeloutput[0]' if(learner_type == 'objectDetection'): self.output_formatfile += '\n' self.output_formatfile += ' df[\'prediction\'] = df[\'prediction\']' else: self.output_formatfile += '\n' self.output_formatfile += ' df[\'prediction\'] = df[\'prediction\'].round(2)' self.output_formatfile += '\n' self.output_formatfile += ' outputjson = df.to_json(orient=\'records\',double_precision=2)' self.output_formatfile += '\n' self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}' self.output_formatfile += '\n' self.output_formatfile += ' return(json.dumps(outputjson))' filename = os.path.join(deploy_path,'script','output_format.py') #print(deploy_path) f = open(filename, "wb") self.log.info('-------> Output Mapping File Location :'+filename) f.write(str(self.output_formatfile).encode('utf8')) f.close()
inputdrift.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import pandas as pd import numpy as np import scipy import warnings import scipy.stats as st import logging import json class inputdrift(): def __init__(self,conf): self.log = logging.getLogger('eion') def get_input_drift(self,ndf,hdf,outputfolder): selectedColumns = self.features.split(',') dataalertcount=0 distributionChangeColumns="" distributionChangeMessage=[] for i in range(0,len(selectedColumns)): data1=hdf[selectedColumns[i]] data2=ndf[selectedColumns[i]] if(data1.dtype !="str" and data2.dtype !="str" ): cumulativeData=data1.append(data2) teststaticValue=teststatic(self,data1,data2) if (teststaticValue < 0.05): distributionName1,sse1=DistributionFinder(self,data1) distributionName2,sse2=DistributionFinder(self,data2) if(distributionName1 == distributionName2): dataalertcount = dataalertcount else: dataalertcount = dataalertcount+1 distributionChangeColumns=distributionChangeColumns+selectedColumns[i]+"," changedColumn = {} changedColumn['Feature'] = selectedColumns[i] changedColumn['KS_Training'] = teststaticValue changedColumn['Training_Distribution'] = distributionName1 changedColumn['New_Distribution'] = distributionName2 distributionChangeMessage.append(changedColumn) else : dataalertcount = dataalertcount else : response ="Selected Columns should be Numerical Values" if(dataalertcount == 0): resultStatus="Model is working as expected" else : resultStatus=json.dumps(distributionChangeMessage) return(dataalertcount,resultStatus) def DistributionFinder(self,data): try: distributionName ="" sse =0.0 KStestStatic=0.0 dataType="" if(data.dtype == "float64"): dataType ="Continuous" elif(data.dtype =="int"): dataType="Discrete" elif(data.dtype =="int64"): dataType="Discrete" if(dataType == "Discrete"): distributions= [st.bernoulli,st.binom,st.geom,st.nbinom,st.poisson] index, counts = np.unique(data.astype(int),return_counts=True) if(len(index)>=2): best_sse = np.inf y1=[] total=sum(counts) mean=float(sum(index*counts))/total variance=float((sum(index**2*counts) -total*mean**2))/(total-1) dispersion=mean/float(variance) theta=1/float(dispersion) r=mean*(float(theta)/1-theta) for j in counts: y1.append(float(j)/total) pmf1=st.bernoulli.pmf(index,mean) pmf2=st.binom.pmf(index,len(index),p=mean/len(index)) pmf3=st.geom.pmf(index,1/float(1+mean)) pmf4=st.nbinom.pmf(index,mean,r) pmf5=st.poisson.pmf(index,mean) sse1 = np.sum(np.power(y1 - pmf1, 2.0)) sse2 = np.sum(np.power(y1 - pmf2, 2.0)) sse3 = np.sum(np.power(y1 - pmf3, 2.0)) sse4 = np.sum(np.power(y1 - pmf4, 2.0)) sse5 = np.sum(np.power(y1- pmf5, 2.0)) sselist=[sse1,sse2,sse3,sse4,sse5] for i in range(0,len(sselist)): if best_sse > sselist[i] > 0: best_distribution = distributions[i].name best_sse = sselist[i] elif (len(index) == 1): best_distribution = "Constant Data-No Distribution" best_sse = 0.0 distributionName =best_distribution sse=best_sse elif(dataType == "Continuous"): distributions = [st.uniform,st.expon,st.weibull_max,st.weibull_min,st.chi,st.norm,st.lognorm,st.t,st.gamma,st.beta] best_distribution = st.norm.name best_sse = np.inf datamin=data.min() datamax=data.max() nrange=datamax-datamin y, x = np.histogram(data.astype(float), bins='auto', density=True) x = (x + np.roll(x, -1))[:-1] / 2.0 for distribution in distributions: with warnings.catch_warnings(): warnings.filterwarnings('ignore') params = distribution.fit(data.astype(float)) # Separate parts of parameters arg = params[:-2] loc = params[-2] scale = params[-1] # Calculate fitted PDF and error with fit in distribution pdf = distribution.pdf(x, loc=loc, scale=scale, *arg) sse = np.sum(np.power(y - pdf, 2.0)) if(best_sse >sse > 0): best_distribution = distribution.name best_sse = sse distributionName =best_distribution sse=best_sse except: response = str(sys.exc_info()[0]) message='Job has Failed'+response print(message) return distributionName,sse ##KStestStatic -pvalue finding def teststatic(self,data1,data2): try: teststatic =st.ks_2samp(data1,data2) pValue=0.0 scipyVersion =scipy.__version__ if(scipyVersion <= "0.14.1"): pValue =teststatic[1] else: pValue =teststatic.pvalue except: response = str(sys.exc_info()[0]) print("Input Drift Job Failed "+response) return pValue
prediction_transformation.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os,sys import platform import json import shutil import logging from pathlib import Path def create_selector_file(self,deploy_path,features,pcaModel_pickle_file,bpca_features,apca_features,textFeatures,nonNumericFeatures,numericalFeatures,profiler,targetFeature, model_type,model,config=None): self.selectorfile += 'import pandas as pd' self.selectorfile += '\n' self.selectorfile += 'import joblib' self.selectorfile += '\n' self.selectorfile += 'import os' self.selectorfile += '\n' self.selectorfile += 'import numpy as np' self.selectorfile += '\n' self.selectorfile += 'class selector(object):' self.selectorfile += '\n' self.selectorfile += ' def apply_selector(self,df):' self.selectorfile += '\n' if pcaModel_pickle_file != '': self.selectorfile += " pcaModel = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','"+pcaModel_pickle_file+"'))" self.selectorfile += '\n' self.selectorfile += ' bpca_features = '+str(bpca_features) self.selectorfile += '\n' self.selectorfile += ' apca_features = '+str(apca_features) self.selectorfile += '\n' self.selectorfile += ' df = pcaModel.transform(df[bpca_features])' self.selectorfile += '\n' self.selectorfile += ' df = pd.DataFrame(df,columns=apca_features)' self.selectorfile += '\n' if(len(features) != 0) and model_type != 'BM25': if model_type.lower()!='anomaly_detection' and model.lower() != 'autoencoder': self.selectorfile += ' df = df['+str(features)+']' self.selectorfile += '\n' self.selectorfile += ' return(df)' filename = os.path.join(deploy_path,'script','selector.py') f = open(filename, "wb") self.log.info('-------> Feature Selector File Location :'+filename) f.write(str(self.selectorfile).encode('utf8')) f.close() featurefile = 'import json' featurefile +='\n' featurefile += 'def getfeatures():' featurefile +='\n' featurefile +=' try:' featurefile +='\n' featurelist = [] if 'profiler' in config: if 'input_features_type' in config['profiler']: inputfeatures = config['profiler']['input_features_type'] for x in inputfeatures: featurelt={} featurelt['feature'] = x print(x,inputfeatures[x]) if x == targetFeature: featurelt['Type'] = 'Target' else: if inputfeatures[x] in ['int','int64','float','float64']: featurelt['Type'] = 'Numeric' elif inputfeatures[x] == 'object': featurelt['Type'] = 'Text' elif inputfeatures[x] == 'category': featurelt['Type'] = 'Category' else: featurelt['Type'] = 'Unknown' featurelist.append(featurelt) featurefile +=' features = '+str(featurelist) featurefile +='\n' featurefile +=' outputjson = {"status":"SUCCESS","features":features}' featurefile +='\n' featurefile +=' output = json.dumps(outputjson)' featurefile +='\n' featurefile +=' print("Features:",output)' featurefile +='\n' featurefile +=' return(output)' featurefile +='\n' featurefile +=' except Exception as e:' featurefile +='\n' featurefile +=' output = {"status":"FAIL","message":str(e).strip(\'"\')}' featurefile +='\n' featurefile +=' print("Features:",json.dumps(output))' featurefile +='\n' featurefile +=' return (json.dumps(output))' featurefile +='\n' featurefile +='if __name__ == "__main__":' featurefile +='\n' featurefile +=' output = getfeatures()' filename = os.path.join(deploy_path,'featureslist.py') f = open(filename, "wb") f.write(str(featurefile).encode('utf8')) f.close() def create_init_function_for_classification(self,modelfile,classes,learner_type,scoreParam,loss_matrix,optimizer,preprocessing_pipe,modelName,model_type,imageconfig): self.modelfile += ' def __init__(self):' self.modelfile += '\n' if (learner_type == 'ML' and model_type.lower()=='anomaly_detection' and modelName.lower()=="autoencoder"): modelfile=modelfile.replace('.sav','') self.modelfile+=" self.model = tf.keras.models.load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))" self.modelfile += '\n' elif(learner_type == 'TextDL' or learner_type == 'DL'): if modelName.lower() == 'googlemodelsearch': self.modelfile += ' import autokeras as ak' self.modelfile += '\n' self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','modelsearch_rootdir','saved_model_onnx.onnx'))" self.modelfile += '\n' else: if scoreParam == 'recall': self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects={'recall': recall_m},compile=False)" self.modelfile += '\n' self.modelfile += ' self.model.compile(loss=\''+loss_matrix+'\',optimizer=\''+optimizer+'\', metrics=[recall_m])' self.modelfile += '\n' elif scoreParam == 'precision': self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects={'precision': precision_m},compile=False)" self.modelfile += '\n' self.modelfile += ' self.model.compile(loss=\''+loss_matrix+'\',optimizer=\''+optimizer+'\', metrics=[precision_m])' self.modelfile += '\n' elif scoreParam == 'roc_auc': self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),compile=False)" self.modelfile += '\n' self.modelfile += ' self.model.compile(loss=\''+loss_matrix+'\',optimizer=\''+optimizer+'\', metrics=[tf.keras.metrics.AUC()])' self.modelfile += '\n' elif scoreParam == 'f1_score': self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects={'f1_score': f1_m},compile=False)" self.modelfile += '\n' self.modelfile += ' self.model.compile(loss=\''+loss_matrix+'\',optimizer=\''+optimizer+'\', metrics=[f1_m])' self.modelfile += '\n' elif scoreParam == 'r2': self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects={'r2': r_square},compile=False)" self.modelfile += '\n' self.modelfile += ' self.model.compile(loss=\''+loss_matrix+'\',optimizer=\''+optimizer+'\', metrics=[r_square])' self.modelfile += '\n' elif scoreParam == 'rmse': self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects={'rmse': rmse_m},compile=False)" self.modelfile += '\n' self.modelfile += ' self.model.compile(loss=\''+loss_matrix+'\',optimizer=\''+optimizer+'\', metrics=[rmse_m])' self.modelfile += '\n' elif scoreParam == 'mse': self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))" self.modelfile += '\n' elif scoreParam == 'mae': self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))" self.modelfile += '\n' elif scoreParam == 'accuracy': self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))" self.modelfile += '\n' else: self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))" self.modelfile += '\n' elif(learner_type == 'Text Similarity'): self.modelfile += " self.preprocessing = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','"+preprocessing_pipe+"'))" self.modelfile += '\n' self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'), custom_objects={'cosine_distance': cosine_distance, 'cos_dist_output_shape': cos_dist_output_shape})" self.modelfile += '\n' elif(learner_type in ['similarityIdentification','contextualSearch']): if scoreParam == 'VectorDB Cosine': vectorfiledbname = 'trainingdataVecDB' self.modelfile += f"\ \n persist_directory = os.path.join(os.path.dirname(__file__),'..','data')\ \n client = chromadb.PersistentClient(path=persist_directory)\ \n self.collection_name = '{vectorfiledbname}'\ \n self.collection = client.get_collection(self.collection_name)\n" else: self.modelfile += " self.train_input = pd.read_csv(os.path.join(os.path.dirname(__file__),'..','data','trainingdata.csv'))\n\n" elif(learner_type == 'ImageClassification'): self.modelfile += ' self.config='+str(imageconfig) self.modelfile += '\n' if(modelName.lower() == 'densenet'): self.modelfile += ' baseModel = tf.keras.applications.DenseNet121(weights="imagenet", include_top=False, input_tensor=Input(shape=(self.config[\'img_width\'],self.config[\'img_height\'],self.config[\'img_channel\'])))' else: self.modelfile += ' baseModel = tensorflow.keras.applications.InceptionV3(weights="imagenet", include_top=False, input_tensor=Input(shape=(self.config[\'img_width\'],self.config[\'img_height\'],self.config[\'img_channel\'])))' self.modelfile += '\n' self.modelfile += ' headModel = baseModel.output' self.modelfile += '\n' self.modelfile += ' headModel = Flatten(name="flatten")(headModel)' self.modelfile += '\n' self.modelfile += ' headModel = Dense(1024, activation=\'relu\')(headModel)' self.modelfile += '\n' self.modelfile += ' headModel = Dropout(0.5)(headModel)' self.modelfile += '\n' self.modelfile += ' headModel = Dense(2, activation=\'sigmoid\')(headModel)' self.modelfile += '\n' self.modelfile += ' headModel = self.model = Model(inputs=baseModel.input, outputs=headModel)' self.modelfile += '\n' self.modelfile += ' opt = Adam(lr=self.config[\'lr\'])' self.modelfile += '\n' self.modelfile += ' self.model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])' self.modelfile += '\n' self.modelfile += " self.model.load_weights(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))" self.modelfile += '\n' elif(learner_type == 'objectDetection'): self.modelfile += " self.MODEL_LOCATION = os.path.join(os.path.dirname(__file__),'..','model')\n" self.modelfile += ' PATH_TO_CFG = self.MODEL_LOCATION+"/export/pipeline.config"\n' self.modelfile += ' PATH_TO_CKPT = self.MODEL_LOCATION+"/export/checkpoint/"\n' self.modelfile += ' PATH_TO_LABELS = self.MODEL_LOCATION+"/export/label_map.pbtxt"\n' self.modelfile += ' configs = config_util.get_configs_from_pipeline_file(PATH_TO_CFG)\n' self.modelfile += ' self.detection_model = model_builder.build(model_config=configs["model"], is_training=False)\n' self.modelfile += ' ckpt = tf.compat.v2.train.Checkpoint(model=self.detection_model)\n' self.modelfile += ' ckpt.restore(os.path.join(PATH_TO_CKPT, "ckpt-0")).expect_partial()\n' self.modelfile += ' self.category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS,\ use_display_name=True)\n' elif learner_type == 'TS' and (modelName.lower() == 'lstm' or modelName.lower() == 'mlp'): self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))" self.modelfile += '\n' elif modelName.lower() == 'neural architecture search': self.modelfile += ' import autokeras as ak' self.modelfile += '\n' self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects=ak.CUSTOM_OBJECTS)" self.modelfile += '\n' else: self.modelfile += " self.model = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))" self.modelfile += '\n' def create_predict(self,learner_type,method,model,model_type,threshold,firstDocFeature,secondDocFeature,padding_length,optimizationmethod,sessonal_freq,additional_regressors,feature,modelFeatures,indexFeature,lag_order,scalertransformationFile,datetimeFeature,scoreParam=None): self.modelfile += ' def predict(self,X,features_names):' self.modelfile += '\n' if (learner_type == 'ML' and model_type.lower()=='anomaly_detection' and model.lower()=="autoencoder"): self.modelfile += f" X=X[{feature}]\n" self.modelfile += f" X = np.asarray(X).astype('float32')\n" self.modelfile += f" reconstructed = self.model.predict(X)\n" self.modelfile += f" predict_loss = tf.keras.losses.mae(reconstructed,X)\n" self.modelfile += ' max_threshold = np.mean(predict_loss) + 2*np.std(predict_loss)\n' self.modelfile += ' min_threshold = np.mean(predict_loss) - 2*np.std(predict_loss)\n' self.modelfile += ' prediction_df = pd.DataFrame()\n' self.modelfile += ' prediction_df["loss"] = predict_loss\n' self.modelfile += ' prediction_df["max_threshold"] = max_threshold\n' self.modelfile += ' prediction_df["min_threshold"] = min_threshold\n' self.modelfile += ' prediction_df["anomaly"] = np.where((prediction_df["loss"] > prediction_df["max_threshold"]) | (prediction_df["loss"] <= prediction_df["min_threshold"]), True, False)\n' self.modelfile += ' return prediction_df\n' elif(learner_type == 'RecommenderSystem'): self.modelfile += ' predictions = []' self.modelfile += '\n' self.modelfile += ' for index,row in X.iterrows():' self.modelfile += '\n' self.modelfile += ' score = self.model.predict(int(row["uid"]),int(row["iid"]))' self.modelfile += '\n' self.modelfile += ' predictions.append(score.est)' self.modelfile += '\n' self.modelfile += ' return predictions' elif(learner_type in ['similarityIdentification','contextualSearch']): tfeatures = list(modelFeatures.split(",")) if indexFeature != '' and indexFeature != 'NA': ifeatures = indexFeature.split(",") for ifes in ifeatures: if ifes not in tfeatures: tfeatures.append(ifes) if model_type == 'BM25': self.modelfile += f"\n\ tokenized_corpus =[doc.split(' ') for doc in self.train_input.tokenize]\n\ bm25 = BM25Okapi(tokenized_corpus)\n\ tokenized_query = [doc.split(' ') for doc in X.tokenize]\n\ logcnt = 5\n\ output = []\n\ for query in tokenized_query:\n\ doc_scores = bm25.get_scores(query)\n\ related_docs_indices = np.argsort(doc_scores)[::-1][:logcnt]\n\ x = self.train_input[{tfeatures}].loc[self.train_input.index[related_docs_indices]]\n\ x['Score'] = doc_scores[related_docs_indices]\n\ x['Score'] = round(x['Score'],2).astype(str)+'%'\n\ output.append(x)\n\ return output\n" elif scoreParam == 'VectorDB Cosine': featuresVecDB = modelFeatures.split(",") self.modelfile += ' logcnt = 5\n' self.modelfile += f" columns = {featuresVecDB}\n" self.modelfile += f"\ \n output = []\ \n for rowindex, row in X.iterrows():\ \n queryembedding = X.iloc[rowindex:rowindex+1].to_numpy()\ \n results = self.collection.query(\ \n query_embeddings=queryembedding.tolist(),\ \n n_results=logcnt\ \n )\ \n x = pd.DataFrame(columns=columns)\ \n for i in range(0, len(results['ids'][0])):\ \n documentAry = results['documents'][0][i]\ \n documentAry = documentAry.split(' ~&~ ')\ \n for j in range(0, len(documentAry)):\ \n x.at[i,columns[j]] = documentAry[j]\ \n x.at[i,'Score'] = results['distances'][0][i]\ \n output.append(x)\ \n return output" else: self.modelfile += ' columns = self.train_input.columns.tolist()\n' self.modelfile += ' logcnt = 5\n' self.modelfile += f" train_input = self.train_input[{tfeatures}]\n" for tf in tfeatures: self.modelfile += f" columns.remove('{tf}')\n" self.modelfile += f"\ \n results = cosine_similarity(self.train_input[columns],X)\ \n output = []\ \n for i in range(results.shape[1]):\ \n related_docs_indices = results[:,i].argsort(axis=0)[:-(int(logcnt) + 1):-1]\ \n x=self.train_input[{tfeatures}].loc[self.train_input.index[related_docs_indices]]\ \n scores = []\ \n for j in range(0,logcnt):\ \n scores.append(str(round((results[related_docs_indices][j][i])*100))+'%')\ \n x['Score'] = scores\ \n output.append(x)\ \n return output" elif(learner_type == 'Text Similarity'): self.modelfile += ' X["'+firstDocFeature+'"] = X["'+firstDocFeature+'"].astype(str)' self.modelfile += '\n' self.modelfile += ' X["'+secondDocFeature+'"] = X["'+secondDocFeature+'"].astype(str)' self.modelfile += '\n' self.modelfile += ' test_sentence1 = self.preprocessing.texts_to_sequences(X["'+firstDocFeature+'"].values)' self.modelfile += '\n' self.modelfile += ' test_sentence2 = self.preprocessing.texts_to_sequences(X["'+secondDocFeature+'"].values)' self.modelfile += '\n' self.modelfile += ' test_sentence1 = pad_sequences(test_sentence1, maxlen='+str(padding_length)+', padding=\'post\')' self.modelfile += '\n' self.modelfile += ' test_sentence2 = pad_sequences(test_sentence2, maxlen='+str(padding_length)+', padding=\'post\')' self.modelfile += '\n' self.modelfile += ' prediction = self.model.predict([test_sentence1, test_sentence2 ])' self.modelfile += '\n' self.modelfile += ' return(prediction)' self.modelfile += '\n' elif(learner_type == 'ImageClassification'): self.modelfile += ' predictions = []' self.modelfile += '\n' self.modelfile += ' for index, row in X.iterrows(): ' self.modelfile += '\n' self.modelfile += ' img = cv2.imread(row[\'imagepath\'])' self.modelfile += '\n' self.modelfile += ' img = cv2.resize(img, (self.config[\'img_width\'],self.config[\'img_height\']))' self.modelfile += '\n' self.modelfile += ' img = image.img_to_array(img)' self.modelfile += '\n' self.modelfile += ' img = np.expand_dims(img, axis=0)' self.modelfile += '\n' self.modelfile += ' img = img/255' self.modelfile += '\n' self.modelfile += ' prediction = self.model.predict(img)' self.modelfile += '\n' self.modelfile += ' prediction = np.argmax(prediction,axis=1)' self.modelfile += '\n' self.modelfile += ' predictions.append(prediction[0])' self.modelfile += '\n' self.modelfile += ' return(predictions)' self.modelfile += '\n' elif(learner_type == 'objectDetection'): self.modelfile += ' @tf.function\n' self.modelfile += ' def detect_fn(image):\n' self.modelfile += ' image, shapes = self.detection_model.preprocess(image)\n' self.modelfile += ' prediction_dict = self.detection_model.predict(image, shapes)\n' self.modelfile += ' detections = self.detection_model.postprocess(prediction_dict, shapes)\n' self.modelfile += ' return detections\n' self.modelfile += ' def load_image_into_numpy_array(path):\n' self.modelfile += ' return np.array(Image.open(path))\n' self.modelfile += ' imageLocation = []\n' self.modelfile += ' for i, row in X.iterrows():\n' self.modelfile += ' if ("confidance" in row) and row["confidance"] <= 1.0:\n' self.modelfile += ' confidance = row["confidance"]\n' self.modelfile += ' else:\n' self.modelfile += ' confidance = 0.8\n' self.modelfile += ' imageName = str(Path(row["imagepath"]).stem)+"_output"+str(Path(row["imagepath"]).suffix)\n' self.modelfile += ' image_np = load_image_into_numpy_array(row["imagepath"])\n' self.modelfile += ' input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)\n' self.modelfile += ' detections = detect_fn(input_tensor)\n' self.modelfile += ' num_detections = int(detections.pop("num_detections"))\n' self.modelfile += ' detections = {key: value[0, :num_detections].numpy()\n\ for key, value in detections.items()}\n' self.modelfile += ' detections["num_detections"] = num_detections\n' self.modelfile += ' detections["detection_classes"] = detections["detection_classes"].astype(np.int64)\n' self.modelfile += ' label_id_offset = 1\n' self.modelfile += ' image_np_with_detections = image_np.copy()\n' self.modelfile += ' viz_utils.visualize_boxes_and_labels_on_image_array(\n\ image_np_with_detections,\n\ detections["detection_boxes"],\n\ detections["detection_classes"]+label_id_offset,\n\ detections["detection_scores"],\n\ self.category_index,\n\ use_normalized_coordinates=True,\n\ max_boxes_to_draw=200,\n\ min_score_thresh=confidance,\n\ agnostic_mode=False)\n' self.modelfile += ' plt.figure()\n' self.modelfile += ' plt.imsave(os.path.join(self.MODEL_LOCATION,imageName), image_np_with_detections)\n' self.modelfile += ' imageLocation.append(os.path.join(self.MODEL_LOCATION,imageName))\n' self.modelfile += ' plt.show()\n' self.modelfile += ' return imageLocation\n' else: if(learner_type == 'DL' and model != 'Neural Network'): self.modelfile += ' X = np.expand_dims(X, axis=2)' self.modelfile += '\n' if(learner_type == 'TextDL'): self.modelfile += ' return pd.DataFrame(np.argmax(self.model.predict(X),axis=1))' self.modelfile += '\n' elif(learner_type == 'TextML'): self.modelfile += ' return pd.DataFrame(self.model.predict_proba(X),columns=self.model.classes_)' self.modelfile += '\n' elif(learner_type == 'DL' and model_type == 'Classification'): self.modelfile += ' X = X.astype(np.float32)' self.modelfile += '\n' self.modelfile += ' return pd.DataFrame(np.argmax(self.model.predict(X),axis=1))' self.modelfile += '\n' else: if(model_type == 'Classification' or model_type == 'TLClassification'): if model == 'Neural Architecture Search': self.modelfile += ' X = X.astype(np.float32)' self.modelfile += '\n' self.modelfile += ' return pd.DataFrame(self.model.predict(X))' self.modelfile += '\n' else: if optimizationmethod == 'genetic': self.modelfile += '\n' self.modelfile += ' try:' self.modelfile += '\n' self.modelfile += ' return pd.DataFrame(self.model.predict_proba(X))' self.modelfile += '\n' self.modelfile += ' except:' self.modelfile += '\n' self.modelfile += ' return pd.DataFrame(self.model.predict(X))' else: self.modelfile += ' X = X.astype(np.float32)' self.modelfile += '\n' if model.lower() == 'deep q network' or model.lower() == 'dueling deep q network': self.modelfile += ' q, _ = self.model(np.array(X), step_type=constant([time_step.StepType.FIRST] * np.array(X).shape[0]), training=False)' self.modelfile += '\n' self.modelfile += ' return pd.DataFrame(q.numpy())' else: self.modelfile += ' return pd.DataFrame(self.model.predict_proba(X), columns=self.model.classes_)' self.modelfile += '\n' elif model_type == 'Regression' and model == 'NAS': self.modelfile += \ """ X = X.astype(np.float32) return self.model.predict(X) """ elif(learner_type == 'TS'): if model.lower() == 'fbprophet': self.modelfile += ' sessonal_freq="'+str(sessonal_freq)+'"' self.modelfile += '\n' self.modelfile += ' ts_prophet_future = self.model.make_future_dataframe(periods=int(X["noofforecasts"][0]),freq=sessonal_freq,include_history = False)' self.modelfile += '\n' if (additional_regressors): self.modelfile += '\n' self.modelfile += ' additional_regressors='+str(additional_regressors) self.modelfile += '\n' self.modelfile += ' ts_prophet_future[additional_regressors] = dataFrame[additional_regressors]' self.modelfile += '\n' self.modelfile += ' ts_prophet_future.reset_index(drop=True)' self.modelfile += '\n' self.modelfile += ' ts_prophet_future=ts_prophet_future.dropna()' self.modelfile += '\n' self.modelfile += ' train_forecast = self.model.predict(ts_prophet_future)' self.modelfile += '\n' self.modelfile += ' prophet_forecast_tail=train_forecast[[\'ds\', \'yhat\', \'yhat_lower\',\'yhat_upper\']].tail( int(X["noofforecasts"][0]))' self.modelfile += '\n' self.modelfile += ' return(prophet_forecast_tail)' elif model.lower() == 'lstm' or model.lower() == 'mlp': self.modelfile += ' lag_order='+str(lag_order) self.modelfile += '\n' self.modelfile += ' xt = X.values' self.modelfile += '\n' scalertransformationFile = scalertransformationFile.split('\\')[-1] self.modelfile += ' loaded_scaler_model = joblib.load(os.path.join(os.path.dirname(__file__),\'..\',\'model\',\''+scalertransformationFile+'\'))' self.modelfile += '\n' self.modelfile += ' xt = xt.astype(\'float32\')' self.modelfile += '\n' self.modelfile += ' xt = loaded_scaler_model.transform(xt)' self.modelfile += '\n' self.modelfile += ' noOfPredictions = 10' self.modelfile += '\n' self.modelfile += ' pred_data = xt' self.modelfile += '\n' self.modelfile += ' y_future = []' self.modelfile += '\n' self.modelfile += ' for i in range(noOfPredictions):' self.modelfile += '\n' if len(feature) == 1: self.modelfile += ' pred_data = pred_data[-lag_order:]' self.modelfile += '\n' if model.lower() == 'mlp': self.modelfile += ' pred_data = pred_data.reshape((1,lag_order))' else: self.modelfile += ' pred_data = pred_data.reshape((1,lag_order,1))' self.modelfile += '\n' self.modelfile += ' pred = self.model.predict(pred_data)' self.modelfile += '\n' self.modelfile += ' predoutput = loaded_scaler_model.inverse_transform(pred) ' self.modelfile += '\n' self.modelfile += ' y_future.append(predoutput.flatten()[-1])' self.modelfile += '\n' self.modelfile += ' pred_data = np.append(pred_data,pred)' self.modelfile += '\n' self.modelfile += ' pred = pd.DataFrame(index=range(0,len(y_future)),columns='+str(feature)+')' self.modelfile += '\n' self.modelfile += ' for i in range(0, len(y_future)):' self.modelfile += '\n' self.modelfile += ' pred.iloc[i] = y_future[i]' self.modelfile += '\n' self.modelfile += ' return pred' else: self.modelfile += ' pdata = pred_data[-lag_order:]' self.modelfile += '\n' self.modelfile += ' pdata = pdata.reshape((1,lag_order,'+str(len(feature))+'))' self.modelfile += '\n' self.modelfile += ' pred = self.model.predict(pdata)' self.modelfile += '\n' self.modelfile += ' predoutput = loaded_scaler_model.inverse_transform(pred) ' self.modelfile += '\n' self.modelfile += ' y_future.append(predoutput)' self.modelfile += '\n' self.modelfile += ' pred_data = np.append(pred_data,pred,axis=0)' self.modelfile += '\n' self.modelfile += ' pred = pd.DataFrame(index=range(0,len(y_future)),columns='+str(feature)+')' self.modelfile += '\n' self.modelfile += ' for i in range(0, len(y_future)):' self.modelfile += '\n' self.modelfile += ' pred.iloc[i] = y_future[i]' self.modelfile += '\n' self.modelfile += ' return pred' else: self.modelfile += ' return self.model.predict(n_periods=int(X["noofforecasts"][0]))' else: if model == 'KaplanMeierFitter': self.modelfile += '\n' self.modelfile += ' res = self.model.predict(X[\''+feature[0]+'\'].astype(int))' self.modelfile += '\n' self.modelfile += ' if isinstance(res, pd.DataFrame):\n' self.modelfile += ' return res.values.reshape(1,-1)\n' self.modelfile += ' else:\n' self.modelfile += ' return np.array([res])\n' elif model == 'COX': self.modelfile += ' res = []\n' self.modelfile += ' for idx,row in X.iterrows():\n' self.modelfile += ' res.append(self.model.predict_survival_function(X, times=row[self.model.duration_col])[idx].values[0])\n' self.modelfile += ' return pd.DataFrame(res)' #self.modelfile += ' return self.model.predict_survival_function(X, times=X[self.model.duration_col])' self.modelfile += '\n' elif(learner_type == 'DL' and model_type in ['Classification','Regression']): self.modelfile += ' X = X.astype(np.float32)' self.modelfile += '\n' self.modelfile += ' return self.model.predict(X).reshape(1, -1)' self.modelfile += '\n' elif (model_type == 'Clustering' and model == 'DBSCAN'): self.modelfile += ' return self.model.fit_predict(X)' elif(model_type.lower() == 'anomaly_detection' and model.lower() == 'dbscan'): self.modelfile += " pred=self.model.fit_predict(X)\n" self.modelfile += " X.loc[:,'cluster'] = self.model.labels_ \n" self.modelfile += ' return X\n' elif model_type.lower() == 'anomaly_detection': self.modelfile += ' X = X.astype(np.float32)\n' self.modelfile += ' return self.model.predict(X)' else: if model_type != 'Clustering': self.modelfile += ' X = X.astype(np.float32)' self.modelfile += '\n' #self.modelfile += ' return self.model.predict(X).reshape(1, -1)' self.modelfile += \ """ if isinstance(self.model, LatentDirichletAllocation): output = np.matrix(self.model.transform(X)).argmax(axis=1) return output.flatten().tolist() return self.model.predict(X).reshape(1, -1) """
base.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' from pathlib import Path from AION.prediction_package.imports import importModule from AION.prediction_package.aion_prediction import aionPrediction from AION.prediction_package.utility import TAB_CHAR from AION.prediction_package import utility from AION.prediction_package import common def file_header( usecase=''): return '' class deployer(): """ base deployer class which can be used to generate the deployemnt code. This class will be inherited by deployer specific to problem type. """ def __init__(self, params={}): if not params['paths']['deploy']: raise ValueError('Deploy path is not provided') self.deploy_path = Path(params['paths']['deploy']) if not self.deploy_path.exists(): self.deploy_path.mkdir(parents=True, exist_ok=True) self.name = params.get('problem_type', '') self.params = params self.importer = importModule() self.feature_reducer = False def profiler_code(self): return common.profiler_code(self.params['profiler']) def feature_engg_code(self): if self.params['selector'].get('reducer',False): code, modules = common.feature_reducer_code(self.params['selector']) else: code, modules = common.feature_selector_code(self.params['selector']) utility.import_modules(self.importer, modules) return code def training_code(self): return common.training_code(self.params['training']) def formatter_code(self): return '' def run(self): """ run function will be called to start the deployment process. This function will create following files inputprofiler.py for preprocessing the input aion_predict.py for prediction model service file """ code = self.predict_code( ) with open(self.deploy_path/'aion_predict.py', 'w') as f: f.write(code) profiler_code = self.profiler_code() with open(self.deploy_path/'script'/'inputprofiler.py', 'w') as f: f.write(profiler_code) self.create_model_service( ) self.create_publish_service() self.create_idrift() self.create_odrift() common.create_feature_list(self.params, self.params['features']['target_feat'], self.deploy_path) common.requirement_file(self.deploy_path,self.params['training']['algo'],self.params['features']['text_feat']) common.create_readme_file(self.deploy_path, self.params['training']['model_file'], self.params['features']['input_feat']) self.create_utils_folder() def predict_code(self): imported_modules = [ {'module': 'json', 'mod_from': None, 'mod_as': None}, {'module': 'joblib', 'mod_from': None, 'mod_as': None}, {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}, {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None} ] utility.import_modules(self.importer, imported_modules) self.importer.addLocalModule(module='inputprofiler',mod_from='script.inputprofiler') code_text = "" code_text += self.feature_engg_code() code_text += self.training_code() code_text += self.formatter_code() code_text += common.main_code() code = file_header() code += self.importer.getCode() return code + code_text def create_model_service(self): service_name = '{}{}{}'.format(self.params['usecase_name'], '_' if self.params['usecase_ver'] != '' else '', self.params['usecase_ver']) obj = aionPrediction() obj.create_model_service(self.deploy_path, service_name, self.name) def create_publish_service(self): obj = aionPrediction() obj.create_publish_service(self.params['paths']['usecase'], self.params['usecase_name'],self.params['usecase_ver'], self.name) def create_idrift(self): pass def create_odrift(self): pass def create_utils_folder(self): common.create_util_folder(self.deploy_path)
forecasting.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' from pathlib import Path from AION.prediction_package.imports import importModule from AION.prediction_package.aion_prediction import aionPrediction from AION.prediction_package.utility import TAB_CHAR from AION.prediction_package import utility from AION.prediction_package.base import deployer from AION.prediction_package import common import numpy as np def get_deployer( params): if params['training']['algo'] == 'ARIMA': return arima(params) elif params['training']['algo'] == 'LSTM': return lstm(params) elif params['training']['algo'] == 'ENCODER_DECODER_LSTM_MVI_UVO': return lstmencdec_mviuvo(params) elif params['training']['algo'] == 'MLP': return mlp(params) elif params['training']['algo'] == 'VAR': return var(params) elif params['training']['algo'] == 'FBPROPHET': return fbprophet(params) else: raise ValueError(f"Algorithm {params['training']['algo']} for time series forecasting is not supported") def _profiler_code(params, importer): """ This will create the profiler file based on the config file. separated file is created as profiler is required for input drift also. """ imported_modules = [ {'module': 'json', 'mod_from': None, 'mod_as': None}, {'module': 'scipy', 'mod_from': None, 'mod_as': None}, {'module': 'joblib', 'mod_from': None, 'mod_as': None}, {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}, {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None} ] utility.import_modules(importer, imported_modules) if 'code' in params['profiler'].get('preprocess',{}).keys(): code = params['profiler']['preprocess']['code'] else: code = "" code += """ class inputprofiler(): """ init_code = """ def __init__(self): """ init_code += """ # preprocessing preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl' if not preprocess_path.exists(): raise ValueError(f'Preprocess model file not found: {preprocess_path}') self.profiler = joblib.load(preprocess_path) """ run_code = """ def run(self,df): df = df.replace(r'^\s*$', np.NaN, regex=True) """ if 'code' in params['profiler'].get('preprocess',{}).keys(): run_code += """ df = preprocess( df)""" if params['profiler'].get('unpreprocessed_columns'): run_code += f""" unpreprocessed_data = df['{params['profiler']['unpreprocessed_columns'][0]}'] df.drop(['{params['profiler']['unpreprocessed_columns'][0]}'], axis=1,inplace=True) """ if params['profiler'].get('force_numeric_conv'): run_code += f""" df[{params['profiler']['force_numeric_conv']}] = df[{params['profiler']['force_numeric_conv']}].apply(pd.to_numeric,errors='coerce')""" run_code += _profiler_main_code(params) if params['profiler'].get('unpreprocessed_columns'): run_code += f""" df['{params['profiler'].get('unpreprocessed_columns')[0]}'] = unpreprocessed_data """ run_code += """ return df """ utility.import_modules(importer, imported_modules) import_code = importer.getCode() return import_code + code + init_code + run_code def _profiler_main_code(params): code = f""" df = self.profiler.transform(df) columns = {params['profiler']['output_features']} if isinstance(df, scipy.sparse.spmatrix): df = pd.DataFrame(df.toarray(), columns=columns) else: df = pd.DataFrame(df, columns=columns) """ return code class arima( deployer): def __init__(self, params={}): super().__init__( params) self.name = 'timeseriesforecasting' def profiler_code( self): imported_modules = [ {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}, {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, ] importer = importModule() utility.import_modules(importer, imported_modules) code = """ class inputprofiler(): def __init__(self): pass def run( self,df): df = df.replace(r'^\s*$', np.NaN, regex=True) return df[['noofforecasts']] """ return importer.getCode() + code def feature_engg_code(self): self.importer.addModule(module='pandas',mod_as='pd') return f""" class selector(): def __init__(self): pass def run(self, df): return df """ def training_code( self): self.importer.addModule(module='pandas',mod_as='pd') self.importer.addModule(module='Path',mod_from='pathlib') self.importer.addModule(module='numpy',mod_as='np') self.importer.addModule(module='joblib') return f""" class trainer(): def __init__(self): model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}" if not model_file.exists(): raise ValueError(f'Trained model file not found: {{model_file}}') self.model = joblib.load(model_file) def run(self,df): return self.model.predict(n_periods=int(df["noofforecasts"][0])) """ def formatter_code(self): self.importer.addModule('json') self.importer.addModule('pandas', mod_as='pd') return """ class output_format(): def __init__( self): pass def run(self,raw_df,df): df = df.round(2) df = json.dumps(df.tolist()) outputjson = {"status":"SUCCESS","data":eval(df)} return(json.dumps(outputjson)) """ class lstm( deployer): def __init__(self, params={}): super().__init__( params) self.name = 'timeseriesforecasting' def profiler_code(self): importer = importModule() return _profiler_code( self.params, importer) def training_code( self): self.importer.addModule(module='pandas',mod_as='pd') self.importer.addModule(module='Path',mod_from='pathlib') code = f""" class trainer(): """ init_code, run_code = self._get_train_code() return code + init_code + run_code def _get_train_code(self): self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models') init_code = f""" def __init__( self): model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}" if not model_file.exists(): raise ValueError(f'Trained model file not found: {{model_file}}') self.model = load_model(model_file) """ run_code = f""" def run(self, df): lag_order={self.params['training']['lag_order']} xt = df.values scaler_file = (Path(__file__).parent/"model")/"{self.params['training']['scaler_file']}" if not scaler_file.exists(): raise ValueError(f'Scaling file not found: {{scaler_file}}') loaded_scaler_model = joblib.load(scaler_file) xt = xt.astype('float32') xt = loaded_scaler_model.transform(xt) noOfPredictions = 10 pred_data = xt y_future = [] for i in range(noOfPredictions): """ if len(self.params['selector']['output_features']) == 1: run_code += f""" pred_data = pred_data[-lag_order:] pred_data = pred_data.reshape((1,lag_order,1)) pred = self.model.predict(pred_data) predoutput = loaded_scaler_model.inverse_transform(pred) y_future.append(predoutput.flatten()[-1]) pred_data = np.append(pred_data,pred) pred = pd.DataFrame(index=range(0,len(y_future)),columns={self.params['selector']['output_features']}) for i in range(0, len(y_future)): pred.iloc[i] = y_future[i] return pred """ else: run_code += f""" pdata = pred_data[-lag_order:] pdata = pdata.reshape((1,lag_order,{len(self.params['selector']['output_features'])})) pred = self.model.predict(pdata) predoutput = loaded_scaler_model.inverse_transform(pred) y_future.append(predoutput) pred_data = np.append(pred_data,pred,axis=0) pred = pd.DataFrame(index=range(0,len(y_future)),columns={self.params['selector']['output_features']}) for i in range(0, len(y_future)): pred.iloc[i] = y_future[i] return pred """ return init_code, run_code def formatter_code(self): self.importer.addModule('json') self.importer.addModule('pandas', mod_as='pd') return """ class output_format(): def __init__( self): pass def run(self,raw_df,df): df = df.round(2) df = df.to_json(orient='records') outputjson = {"status":"SUCCESS","data":json.loads(df)} return(json.dumps(outputjson)) """ class lstmencdec_mviuvo( deployer): def __init__(self, params={}): super().__init__( params) self.name = 'timeseriesforecasting' outputFeatrues = params['profiler']['output_features'] self.targetColIndx = outputFeatrues.index(params['features']['target_feat']) selectedColDict = params['selector']['output_features'] self.selectedCols = list() for col in selectedColDict: self.selectedCols.append(col) def profiler_code(self): importer = importModule() return _profiler_code( self.params, importer) def training_code( self): self.importer.addModule(module='pandas',mod_as='pd') self.importer.addModule(module='Path',mod_from='pathlib') code = f""" class trainer(): """ init_code, run_code = self._get_train_code() return code + init_code + run_code def _get_train_code(self): self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models') init_code = f""" def __init__( self): model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}" if not model_file.exists(): raise ValueError(f'Trained model file not found: {{model_file}}') self.model = load_model(model_file) """ run_code = f""" def run(self, df): targetColIndx = {self.targetColIndx} lag_order={self.params['training']['lag_order']} xt = df.values scaler_file = (Path(__file__).parent/"model")/"{self.params['training']['scaler_file']}" if not scaler_file.exists(): raise ValueError(f'Scaling file not found: {{scaler_file}}') loaded_scaler_model = joblib.load(scaler_file) xt = xt.astype('float32') xt = loaded_scaler_model.transform(xt) noOfPredictions = 10 pred_data = xt y_future = [] pdata = pred_data[-lag_order:] pdata = pdata.reshape((1,lag_order,{len(self.params['selector']['output_features'])})) pred = self.model.predict(pdata) pred_1d = pred.ravel() pdata_2d = pdata.ravel().reshape(len(pdata) * lag_order, {len(self.params['selector']['output_features'])}) pdata_2d[:,targetColIndx] = pred_1d pred_2d_inv = loaded_scaler_model.inverse_transform(pdata_2d) predout = pred_2d_inv[:, targetColIndx] predout = predout.reshape(len(pred_1d),1) pred = pd.DataFrame(index=range(0,len(predout)),columns=['{self.params['features']['target_feat']}']) for i in range(0, len(predout)): pred.iloc[i] = predout[i] return pred """ return init_code, run_code def feature_engg_code(self): self.importer.addModule(module='pandas',mod_as='pd') return f""" class selector(): def __init__(self): pass def run(self, df): return df[{self.selectedCols}] """ def formatter_code(self): self.importer.addModule('json') self.importer.addModule('pandas', mod_as='pd') return """ class output_format(): def __init__( self): pass def run(self,raw_df,df): df = df.round(2) df = df.to_json(orient='records') outputjson = {"status":"SUCCESS","data":json.loads(df)} return(json.dumps(outputjson)) """ class mlp( lstm): def __init__(self, params={}): super().__init__( params) self.name = 'timeseriesforecasting' def training_code( self): self.importer.addModule(module='pandas',mod_as='pd') self.importer.addModule(module='Path',mod_from='pathlib') code = f""" class trainer(): """ init_code, run_code = self._get_train_code() return code + init_code + run_code def _get_train_code(self): self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models') init_code = f""" def __init__( self): model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}" if not model_file.exists(): raise ValueError(f'Trained model file not found: {{model_file}}') self.model = load_model(model_file)""" run_code = f""" def run(self, df): lag_order={self.params['training']['lag_order']} xt = df.values scaler_file = (Path(__file__).parent/"model")/"{self.params['training']['scaler_file']}" if not scaler_file.exists(): raise ValueError(f'Scaling file not found: {{scaler_file}}') loaded_scaler_model = joblib.load(scaler_file) xt = xt.astype('float32') xt = loaded_scaler_model.transform(xt) noOfPredictions = 10 pred_data = xt y_future = [] for i in range(noOfPredictions): """ if len(self.params['selector']['output_features']) == 1: run_code += f""" pred_data = pred_data[-lag_order:] pred_data = pred_data.reshape((1,lag_order)) pred = self.model.predict(pred_data) predoutput = loaded_scaler_model.inverse_transform(pred) y_future.append(predoutput.flatten()[-1]) pred_data = np.append(pred_data,pred) pred = pd.DataFrame(index=range(0,len(y_future)),columns={self.params['selector']['output_features']}) for i in range(0, len(y_future)): pred.iloc[i] = y_future[i] return pred """ else: run_code += f""" pdata = pred_data[-lag_order:] pdata = pdata.reshape((1,lag_order,{len(self.params['selector']['output_features'])})) pred = self.model.predict(pdata) predoutput = loaded_scaler_model.inverse_transform(pred) y_future.append(predoutput) pred_data = np.append(pred_data,pred,axis=0) pred = pd.DataFrame(index=range(0,len(y_future)),columns={self.params['selector']['output_features']}) for i in range(0, len(y_future)): pred.iloc[i] = y_future[i] return pred """ return init_code, run_code class var( deployer): def __init__(self, params={}): super().__init__( params) self.name = 'timeseriesforecasting' def profiler_code(self): importer = importModule() code = _profiler_code( self.params, importer) return code def feature_engg_code(self): self.importer.addModule(module='pandas',mod_as='pd') return f""" class selector(): def __init__(self): pass def run(self, df): return df[{self.params['selector']['output_features']}] """ def training_code( self): self.importer.addModule(module='joblib') return f""" class trainer(): def __init__( self): model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}" if not model_file.exists(): raise ValueError(f'Trained model file not found: {{model_file}}') self.model = joblib.load(model_file) def run(self,df): lag_order = self.model.k_ar return self.model.forecast(df.values[-lag_order:],steps={self.params['training']['no_of_prediction']}) """ def formatter_code(self): self.importer.addModule('json') self.importer.addModule('pandas', mod_as='pd') return f""" class output_format(): def __init__( self): pass def invertTransformation(self,predictions): datasetdf = pd.read_csv((Path(__file__).parent/"data")/"trainingdata.csv") dictDiffCount = {self.params['training']['dictDiffCount']} target_features = "{self.params['features']['target_feat']}" columns = target_features.split(',') pred = pd.DataFrame(index=range(0,len(predictions)),columns=columns) for j in range(0,len(columns)): for i in range(0, len(predictions)): pred.iloc[i][j] = round(predictions[i][j],2) prediction = pred for col in columns: if col in dictDiffCount: if dictDiffCount[col]==2: prediction[col] = (datasetdf[col].iloc[-1]-datasetdf[col].iloc[-2]) + prediction[col].cumsum() prediction[col] = datasetdf[col].iloc[-1] + prediction[col].cumsum() prediction = pred return(prediction) def run(self,raw_df,df): df = self.invertTransformation(df) df = df.to_json(orient='records',double_precision=2) outputjson = {{"status":"SUCCESS","data":json.loads(df)}} return(json.dumps(outputjson)) """ class fbprophet( deployer): def __init__(self, params={}): super().__init__( params) self.name = 'timeseriesforecasting' def profiler_code( self): imported_modules = [ {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}, {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, ] importer = importModule() utility.import_modules(importer, imported_modules) code = """ class inputprofiler(): def __init__(self): pass def run( self,df): df = df.replace(r'^\s*$', np.NaN, regex=True) return df[['noofforecasts']] """ return importer.getCode() + code def feature_engg_code(self): self.importer.addModule(module='pandas',mod_as='pd') return f""" class selector(): def __init__(self): pass def run(self, df): return df """ def training_code( self): self.importer.addModule(module='pandas',mod_as='pd') self.importer.addModule(module='Path',mod_from='pathlib') self.importer.addModule(module='joblib') code = f""" class trainer(): def __init__(self): model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}" if not model_file.exists(): raise ValueError(f'Trained model file not found: {{model_file}}') self.model = joblib.load(model_file) """ code += f""" def run(self,df): sessonal_freq = '{self.params['training']['sessonal_freq']}' ts_prophet_future = self.model.make_future_dataframe(periods=int(df["noofforecasts"][0]),freq=sessonal_freq,include_history = False) """ if (self.params['training']['additional_regressors']): code += f""" additional_regressors={self.params['training']['additional_regressors']} ts_prophet_future[additional_regressors] = dataFrame[additional_regressors] ts_prophet_future.reset_index(drop=True) ts_prophet_future=ts_prophet_future.dropna() """ code += """ train_forecast = self.model.predict(ts_prophet_future) prophet_forecast_tail=train_forecast[[\'ds\', \'yhat\', \'yhat_lower\',\'yhat_upper\']].tail( int(df["noofforecasts"][0])) return(prophet_forecast_tail)""" return code def formatter_code(self): self.importer.addModule('json') self.importer.addModule('pandas', mod_as='pd') return """ class output_format(): def __init__( self): pass def run(self,raw_df,df): df = df.to_json(orient='records') outputjson = {"status":"SUCCESS","data":json.loads(df)} return(json.dumps(outputjson)) """
local_pipeline.py
import docker import json import logging def read_json(file_path): data = None with open(file_path,'r') as f: data = json.load(f) return data def run_pipeline(inputconfig): inputconfig = json.loads(inputconfig) logfilepath = inputconfig['logfilepath'] logging.basicConfig(level=logging.INFO,filename =logfilepath) usecasename = inputconfig['usecase'] logging.info("UseCaseName :"+str(usecasename)) version = inputconfig['version'] logging.info("version :"+str(version)) config = inputconfig['dockerlist'] persistancevolume = inputconfig['persistancevolume'] logging.info("PersistanceVolume :"+str(persistancevolume)) datasetpath = inputconfig['datasetpath'] logging.info("DataSet Path :"+str(datasetpath)) config = read_json(config) client = docker.from_env() inputconfig = {'modelName':usecasename,'modelVersion':str(version),'dataLocation':datasetpath} inputconfig = json.dumps(inputconfig) inputconfig = inputconfig.replace('"', '\\"') logging.info("===== Model Monitoring Container Start =====") outputStr = client.containers.run(config['ModelMonitoring'],'python code.py -i'+datasetpath,volumes=[persistancevolume+':/aion']) outputStr = outputStr.decode('utf-8') logging.info('ModelMonitoring: '+str(outputStr)) print('ModelMonitoring: '+str(outputStr)) logging.info("===== ModelMonitoring Stop =====") logging.info("===== Data Ingestion Container Start =====") outputStr = client.containers.run(config['DataIngestion'],'python code.py',volumes=[persistancevolume+':/aion']) outputStr = outputStr.decode('utf-8') logging.info('DataIngestion: '+str(outputStr)) print('DataIngestion: '+str(outputStr)) logging.info("===== Data Ingestion Container Stop =====") outputStr = outputStr.strip() decoded_data = json.loads(outputStr) status = decoded_data['Status'] if status != 'Success': output = {'Status':'Error','Msg':'Data Ingestion Fails'} logging.info("===== Transformation Container Start =====") outputStr = client.containers.run(config['DataTransformation'],'python code.py',volumes=[persistancevolume+':/aion']) outputStr = outputStr.decode('utf-8') logging.info('Data Transformations: '+str(outputStr)) print('Data Transformations: '+str(outputStr)) logging.info("===== Transformation Container Done =====") outputStr = outputStr.strip() decoded_data = json.loads(outputStr) status = decoded_data['Status'] if status != 'Success': output = {'Status':'Error','Msg':'Data Transformations Fails'} logging.info("===== Feature Engineering Container Start =====") outputStr = client.containers.run(config['FeatureEngineering'],'python code.py',volumes=[persistancevolume+':/aion']) outputStr = outputStr.decode('utf-8') logging.info('FeatureEngineering: '+str(outputStr)) print('FeatureEngineering: '+str(outputStr)) logging.info("===== Feature Engineering Container Done =====") outputStr = outputStr.strip() decoded_data = json.loads(outputStr) status = decoded_data['Status'] modeltraining = config['ModelTraining'] for mt in modeltraining: logging.info("===== Training Container Start =====") outputStr = client.containers.run(mt['Training'],'python code.py',volumes=[persistancevolume+':/aion']) outputStr = outputStr.decode('utf-8') logging.info('ModelTraining: '+str(outputStr)) print('ModelTraining: '+str(outputStr)) logging.info("===== Training Container Done =====") outputStr = outputStr.strip() try: decoded_data = json.loads(outputStr) status = decoded_data['Status'] except Exception as inst: logging.info(inst) logging.info("===== Model Registry Start =====") outputStr = client.containers.run(config['ModelRegistry'],'python code.py',volumes=[persistancevolume+':/aion']) outputStr = outputStr.decode('utf-8') logging.info('ModelRegistry: '+str(outputStr)) print('ModelRegistry: '+str(outputStr)) logging.info("===== ModelRegistry Done =====") logging.info("===== ModelServing Start =====") outputStr = client.containers.run(config['ModelServing'],'python code.py',volumes=[persistancevolume+':/aion']) outputStr = outputStr.decode('utf-8') logging.info('Prediction: '+str(outputStr)) print('Prediction: '+str(outputStr)) logging.info("===== ModelServing Done =====")
build_container.py
import os import shutil import sys import subprocess from os.path import expanduser import platform import json def createDockerImage(model_name,model_version,module,folderpath): command = 'docker pull python:3.8-slim-buster' os.system(command); subprocess.check_call(["docker", "build", "-t",module+'_'+model_name.lower()+":"+model_version,"."], cwd=folderpath) def local_docker_build(config): print(config) config = json.loads(config) model_name = config['usecase'] model_version = config['version'] mlaac__code_path = config['mlacPath'] docker_images = {} docker_images['ModelMonitoring'] = 'modelmonitoring'+'_'+model_name.lower()+':'+model_version dataset_addr = os.path.join(mlaac__code_path,'ModelMonitoring') createDockerImage(model_name,model_version,'modelmonitoring',dataset_addr) docker_images['DataIngestion'] = 'dataingestion'+'_'+model_name.lower()+':'+model_version dataset_addr = os.path.join(mlaac__code_path,'DataIngestion') createDockerImage(model_name,model_version,'dataingestion',dataset_addr) transformer_addr = os.path.join(mlaac__code_path,'DataTransformation') docker_images['DataTransformation'] = 'datatransformation'+'_'+model_name.lower()+':'+model_version createDockerImage(model_name,model_version,'datatransformation',transformer_addr) featureengineering_addr = os.path.join(mlaac__code_path,'FeatureEngineering') docker_images['FeatureEngineering'] = 'featureengineering'+'_'+model_name.lower()+':'+model_version createDockerImage(model_name,model_version,'featureengineering',featureengineering_addr) from os import listdir arr = [filename for filename in os.listdir(mlaac__code_path) if filename.startswith("ModelTraining")] docker_training_images = [] for x in arr: dockertraing={} dockertraing['Training'] = str(x).lower()+'_'+model_name.lower()+':'+model_version docker_training_images.append(dockertraing) training_addri = os.path.join(mlaac__code_path,x) createDockerImage(model_name,model_version,str(x).lower(),training_addri) docker_images['ModelTraining'] = docker_training_images docker_images['ModelRegistry'] = 'modelregistry'+'_'+model_name.lower()+':'+model_version deploy_addr = os.path.join(mlaac__code_path,'ModelRegistry') createDockerImage(model_name,model_version,'modelregistry',deploy_addr) docker_images['ModelServing'] = 'modelserving'+'_'+model_name.lower()+':'+model_version deploy_addr = os.path.join(mlaac__code_path,'ModelServing') createDockerImage(model_name,model_version,'modelserving',deploy_addr) outputjsonFile = os.path.join(mlaac__code_path,'dockerlist.json') with open(outputjsonFile, 'w') as f: json.dump(docker_images, f) f.close() output = {'Status':'Success','Msg':outputjsonFile} output = json.dumps(output) print("aion_build_container:",output)
git_upload.py
import os import sys import json from pathlib import Path import subprocess import shutil import argparse def create_and_save_yaml(git_storage_path, container_label,usecasepath): file_name_prefix = 'gh-acr-' yaml_file = f"""\ name: gh-acr-{container_label} on: push: branches: main paths: {container_label}/** workflow_dispatch: jobs: gh-acr-build-push: runs-on: ubuntu-latest steps: - name: 'checkout action' uses: actions/checkout@main - name: 'azure login' uses: azure/login@v1 with: creds: ${{{{ secrets.AZURE_CREDENTIALS }}}} - name: 'build and push image' uses: azure/docker-login@v1 with: login-server: ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}} username: ${{{{ secrets.REGISTRY_USERNAME }}}} password: ${{{{ secrets.REGISTRY_PASSWORD }}}} - run: | docker build ./{container_label}/ModelMonitoring -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelmonitoring:{container_label} docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelmonitoring:{container_label} docker build ./{container_label}/DataIngestion -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/dataingestion:{container_label} docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/dataingestion:{container_label} docker build ./{container_label}/DataTransformation -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/datatransformation:{container_label} docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/datatransformation:{container_label} docker build ./{container_label}/FeatureEngineering -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/featureengineering:{container_label} docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/featureengineering:{container_label} docker build ./{container_label}/ModelRegistry -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelregistry:{container_label} docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelregistry:{container_label} docker build ./{container_label}/ModelServing -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelserving:{container_label} docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelserving:{container_label} """ arr = [filename for filename in os.listdir(usecasepath) if filename.startswith("ModelTraining")] for x in arr: yaml_file+=' docker build ./'+container_label+'/'+x+' -t ${{ secrets.REGISTRY_LOGIN_SERVER }}/'+x.lower()+':'+container_label yaml_file+='\n' yaml_file+=' docker push ${{ secrets.REGISTRY_LOGIN_SERVER }}/'+x.lower()+':'+container_label yaml_file+='\n' with open(Path(git_storage_path)/(file_name_prefix + container_label + '.yaml'), 'w') as f: f.write(yaml_file) def run_cmd(cmd): try: subprocess.check_output(cmd, stderr=subprocess.PIPE) except subprocess.CalledProcessError as e: if e.stderr: if isinstance(e.stderr, bytes): err_msg = e.stderr.decode(sys.getfilesystemencoding()) else: err_msg = e.stderr elif e.output: if isinstance(e.output, bytes): err_msg = e.output.decode(sys.getfilesystemencoding()) else: err_msg = e.output else: err_msg = str(e) return False, err_msg return True, "" def validate_config(config): non_null_keys = ['url','username', 'token', 'location', 'gitFolderLocation', 'email', 'modelName'] missing_keys = [k for k in non_null_keys if k not in config.keys()] if missing_keys: raise ValueError(f"following fields are missing in config file: {missing_keys}") for k,v in config.items(): if k in non_null_keys and not v: raise ValueError(f"Please provide value for '{k}' in config file.") def upload(config): validate_config(config) url_type = config.get('url_type','https') if url_type == 'https': https_str = "https://" url = https_str + config['username'] + ":" + config['token'] + "@" + config['url'][len(https_str):] else: url = config['url'] model_location = Path(config['location']) git_folder_location = Path(config['gitFolderLocation']) git_folder_location.mkdir(parents=True, exist_ok=True) (git_folder_location/'.github'/'workflows').mkdir(parents=True, exist_ok=True) if not model_location.exists(): raise ValueError('Trained model data not found') os.chdir(str(git_folder_location)) (git_folder_location/config['modelName']).mkdir(parents=True, exist_ok=True) shutil.copytree(model_location, git_folder_location/config['modelName'], dirs_exist_ok=True) create_and_save_yaml((git_folder_location/'.github'/'workflows'), config['modelName'],config['location']) if (Path(git_folder_location)/'.git').exists(): first_upload = False else: first_upload = True if first_upload: cmd = ['git','init'] status, msg = run_cmd(cmd) if not status: raise ValueError(msg) cmd = ['git','config','user.name',config['username']] status, msg = run_cmd(cmd) if not status: raise ValueError(msg) cmd = ['git','config','user.email',config['email']] status, msg = run_cmd(cmd) if not status: raise ValueError(msg) cmd = ['git','add', '-A'] status, msg = run_cmd(cmd) if not status: raise ValueError(msg) cmd = ['git','commit','-m',f"commit {config['modelName']}"] status, msg = run_cmd(cmd) if not status: raise ValueError(msg) cmd = ['git','branch','-M','main'] status, msg = run_cmd(cmd) if not status: raise ValueError(msg) if first_upload: cmd = ['git','remote','add','origin', url] status, msg = run_cmd(cmd) if not status: raise ValueError(msg) cmd = ['git','push','-f','-u','origin', 'main'] status, msg = run_cmd(cmd) if not status: raise ValueError(msg) else: cmd = ['git','push'] status, msg = run_cmd(cmd) if not status: raise ValueError(msg) return json.dumps({'Status':'SUCCESS'}) if __name__ == '__main__': try: if shutil.which('git') is None: raise ValueError("git is not installed on this system") parser = argparse.ArgumentParser() parser.add_argument('-c', '--config', help='Config file location or as a string') args = parser.parse_args() if Path(args.config).is_file() and Path(args.config).suffix == '.json': with open(args.config,'r') as f: config = json.load(f) else: config = json.loads(args.config) print(upload(config)) except Exception as e: status = {'Status':'Failure','msg':str(e)} print(json.dumps(status))
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
kafka_consumer.py
from kafka import KafkaConsumer from json import loads import pandas as pd import json import os,sys import time import multiprocessing from os.path import expanduser import platform import datetime modelDetails = {} class Process(multiprocessing.Process): def __init__(self, modelSignature,jsonData,predictedData,modelpath): super(Process, self).__init__() self.config = jsonData self.modelSignature = modelSignature self.data = predictedData self.modelpath = modelpath def run(self): #data = pd.json_normalize(self.data) minotoringService = self.config['minotoringService']['url'] trainingdatalocation = self.config['trainingDataLocation'][self.modelSignature] #filetimestamp = 'AION_'+str(int(time.time()))+'.csv' #data.to_csv(dataFile, index=False) inputFieldsJson = {"trainingDataLocation":trainingdatalocation,"currentDataLocation":self.data} inputFieldsJson = json.dumps(inputFieldsJson) ser_url = minotoringService+self.modelSignature+'/monitoring' driftTime = datetime.datetime.now() import requests try: response = requests.post(ser_url, data=inputFieldsJson,headers={"Content-Type":"application/json",}) outputStr=response.content outputStr = outputStr.decode('utf-8') outputStr = outputStr.strip() decoded_data = json.loads(outputStr) print(decoded_data) status = decoded_data['status'] msg = decoded_data['data'] except Exception as inst: if 'Failed to establish a new connection' in str(inst): status = 'Fail' msg = 'AION Service needs to be started' else: status = 'Fail' msg = 'Error during Drift Analysis' statusFile = os.path.join(self.modelpath,self.modelSignature+'_status.csv') df = pd.DataFrame(columns = ['dateTime', 'status', 'msg']) df = df.append({'dateTime' : driftTime, 'status' : status, 'msg' : msg},ignore_index = True) print(df) if (os.path.exists(statusFile)): df.to_csv(statusFile, mode='a', header=False,index=False) else: df.to_csv(statusFile, header=True,index=False) def launch_kafka_consumer(): from appbe.dataPath import DATA_DIR configfile = os.path.join(os.path.dirname(__file__),'..','config','kafkaConfig.conf') with open(configfile,'r',encoding='utf-8') as f: jsonData = json.load(f) f.close() kafkaIP=jsonData['kafkaCluster']['ip'] kafkaport = jsonData['kafkaCluster']['port'] topic = jsonData['kafkaCluster']['topic'] kafkaurl = kafkaIP+':'+kafkaport if jsonData['database']['csv'] == 'True': database = 'csv' elif jsonData['database']['mySql'] == 'True': database = 'mySql' else: database = 'csv' kafkaPath = os.path.join(DATA_DIR,'kafka') if not (os.path.exists(kafkaPath)): try: os.makedirs(kafkaPath) except OSError as e: pass consumer = KafkaConsumer(topic,bootstrap_servers=[kafkaurl],auto_offset_reset='earliest',enable_auto_commit=True,group_id='my-group',value_deserializer=lambda x: loads(x.decode('utf-8'))) for message in consumer: message = message.value data = message['data'] data = pd.json_normalize(data) modelname = message['usecasename'] version = message['version'] modelSignature = modelname+'_'+str(version) modelpath = os.path.join(kafkaPath,modelSignature) try: os.makedirs(modelpath) except OSError as e: pass secondsSinceEpoch = time.time() if modelSignature not in modelDetails: modelDetails[modelSignature] = {} modelDetails[modelSignature]['startTime'] = secondsSinceEpoch if database == 'csv': csvfile = os.path.join(modelpath,modelSignature+'.csv') if (os.path.exists(csvfile)): data.to_csv(csvfile, mode='a', header=False,index=False) else: data.to_csv(csvfile, header=True,index=False) modelTimeFrame = jsonData['timeFrame'][modelSignature] currentseconds = time.time() print(currentseconds - modelDetails[modelSignature]['startTime']) if (currentseconds - modelDetails[modelSignature]['startTime']) >= float(modelTimeFrame): csv_path = os.path.join(modelpath,modelSignature+'.csv') #predictedData = pd.read_csv(csv_path) ##predictedData = predictedData.to_json(orient="records") index = Process(modelSignature,jsonData,csv_path,modelpath) index.start() modelDetails[modelSignature]['startTime'] = secondsSinceEpoch
pattern.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import pandas as pd import numpy as np import sys import math import markov_clustering as mc import os import networkx as nx import logging import json ## How far you'd like your random-walkers to go (bigger number -> more walking) EXPANSION_POWER = 2 ## How tightly clustered you'd like your final picture to be (bigger number -> more clusters) INFLATION_POWER = 2 ## If you can manage 100 iterations then do so - otherwise, check you've hit a stable end-point. ITERATION_COUNT = 100 def normalize(matrix): return matrix/np.sum(matrix, axis=0) def expand(matrix, power): return np.linalg.matrix_power(matrix, power) def inflate(matrix, power): for entry in np.nditer(matrix, op_flags=['readwrite']): entry[...] = math.pow(entry, power) return matrix class pattern: def __init__(self,modelFeatures,targetFeature): self.modelFeatures = modelFeatures.split(',') self.targetFeature = targetFeature self.log = logging.getLogger('eion') def training(self,df,outputLocation): df["code"] = df[self.targetFeature].astype("category") df['code'] = df.code.cat.codes df2 = df[[self.targetFeature,'code']] df2 = df2.drop_duplicates() code_book = df2.to_dict('records') size = len(code_book) if self.targetFeature in self.modelFeatures: self.modelFeatures.remove(self.targetFeature) df['prev_code'] = df.groupby(self.modelFeatures)['code'].shift() df['prev_activity'] = df.groupby(self.modelFeatures)[self.targetFeature].shift() print(self.modelFeatures) df = df.dropna(axis=0, subset=['prev_code']) df['prev_code'] = df['prev_code'].astype('int32') matrix = np.zeros((size, size),float) np.set_printoptions(suppress=True) for index, row in df.iterrows(): matrix[int(row['prev_code'])][int(row['code'])] += 1 np.fill_diagonal(matrix, 1) matrix = normalize(matrix) pmatrix = matrix i = 0 records = [] for row in matrix: j = 0 for val in row: for event in code_book: if event['code'] == i: page = event[self.targetFeature] if event['code'] == j: nextpage = event[self.targetFeature] record = {} record['State'] = page record['NextState'] = nextpage record['Probability'] = round(val,2) records.append(record) j = j+1 i = i+1 df_probability = pd.DataFrame(records) self.log.info('Status:- |... StateTransition Probability Matrix') for _ in range(ITERATION_COUNT): matrix = normalize(inflate(expand(matrix, EXPANSION_POWER), INFLATION_POWER)) result = mc.run_mcl(matrix) # run MCL with default parameters c = 0 clusters = mc.get_clusters(matrix) # get clusters self.log.info('Status:- |... StateTransition Algorithm applied: MarkovClustering') clusterrecords = [] for cluster in clusters: clusterid = c clusterlist = '' for pageid in cluster: for event in code_book: if event['code'] == pageid: page = event[self.targetFeature] if clusterlist != '': clusterlist = clusterlist+',' clusterlist = clusterlist+page record = {} record['clusterid'] = c record['clusterlist'] = clusterlist clusterrecords.append(record) c = c+1 df_cluster = pd.DataFrame(clusterrecords) probabilityoutputfile = os.path.join(outputLocation, 'stateTransitionProbability.csv') self.log.info('-------> State Transition Probability Matrix:' + probabilityoutputfile) df_probability.to_csv(probabilityoutputfile,index=False) clusteringoutputfile = os.path.join(outputLocation, 'stateClustering.csv') self.log.info('-------> State Transition Probability Grouping:' + clusteringoutputfile) df_cluster.to_csv(clusteringoutputfile,index=False) datadetailsfile = os.path.join(outputLocation, 'datadetails.json') dataanalytics = {} dataanalytics['activity'] = self.targetFeature dataanalytics['sessionid'] = self.modelFeatures[0] updatedConfig = json.dumps(dataanalytics) with open(datadetailsfile, "w") as fpWrite: fpWrite.write(updatedConfig) fpWrite.close() evaulatemodel = '{"Model":"MarkovClustering","Score":0}' return(evaulatemodel,probabilityoutputfile,clusteringoutputfile)
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
runtime_utility.py
class aionRunTimeUtility: # def __init__(self): # print("AI.ON ConversionUtility function init...") def executeOnRuntime(self,inputModelName,inputDataSet): # print("AI.ON ConversionUtility function starts...") RuntimeType = inputModelName.rsplit('.', 1)[1] inputDataType = inputDataSet.rsplit('.', 1)[1] if((RuntimeType == 'ONNX' or RuntimeType == 'onnx') and (inputDataType.lower()=='json')): # print("Inference through ONNX Runtime started [ML]") import pandas import json with open(inputDataSet) as datafile: data = json.load(datafile) dataframe = pandas.DataFrame(data,index=[0]) import numpy import onnxruntime as rt sess = rt.InferenceSession(inputModelName) input_name = sess.get_inputs()[0].name label_name = sess.get_outputs()[0].name inputsize=sess.get_inputs()[0].shape first_n_column = dataframe.iloc[: , :inputsize[1]] dataset = first_n_column.values if(inputsize[1]!=len(dataframe.columns)): print("Error : Input Data size does not match") return 0 pred_onx = sess.run([label_name], {input_name: dataset.astype(numpy.float32)[0:1]})[0] # for i in range(0, 1): #print("ONNX Runtime Prediction [csv]: ",pred_onx) output = numpy.squeeze(pred_onx) predictions = numpy.squeeze(output) prediction = numpy.argmax(predictions) return(prediction) # print("Inference through ONNX modelcompleted ") if((RuntimeType == 'ONNX' or RuntimeType == 'onnx') and (inputDataType!='json')): import numpy as np import onnxruntime as rt from tensorflow.keras.preprocessing import image sess = rt.InferenceSession(inputModelName) input_name = sess.get_inputs()[0].name label_name = sess.get_outputs()[0].name inputsize=sess.get_inputs()[0].shape img = image.load_img(inputDataSet, target_size=(inputsize[1], inputsize[2])) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) pred_onx = sess.run([label_name], {input_name: x.astype(np.float32)[0:1]})[0] output = np.squeeze(pred_onx) predictions = np.squeeze(output) return(pred_onx) if((RuntimeType == 'TFLITE' or RuntimeType == 'tflite')and (inputDataType=='json')): import numpy as np import tensorflow as tf import pandas from numpy import asarray interpreter = tf.lite.Interpreter(model_path=inputModelName) interpreter.allocate_tensors() input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() input_shape = input_details[0]['shape'] import pandas import json with open(inputDataSet) as datafile: data = json.load(datafile) dataframe = pandas.DataFrame(data,index=[0]) dataset = dataframe.values XYZ = dataset[:,0:input_shape[1]].astype(float) input_data = asarray(XYZ[0]).reshape((1, input_shape[1])) for i in range(0, 1): input_data = asarray(XYZ[i]).reshape((1,input_shape[1])) interpreter.set_tensor(input_details[0]['index'], input_data.astype(np.float32)[0:1]) interpreter.invoke() output_data = interpreter.get_tensor(output_details[0]['index']) predictions = np.squeeze(output_data) prediction = np.argmax(predictions) return(prediction) if((RuntimeType == 'TFLITE' or RuntimeType == 'tflite') and (inputDataType!='json')): import numpy as np from tensorflow.keras.preprocessing import image import os import tensorflow as tf import pandas from numpy import asarray interpreter = tf.lite.Interpreter(model_path=inputModelName) interpreter.allocate_tensors() input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() input_shape = input_details[0]['shape'] img = image.load_img(inputDataSet, target_size=(input_shape[1], input_shape[2])) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) interpreter.set_tensor(input_details[0]['index'], x.astype(np.float32)[0:1]) interpreter.invoke() output_data = interpreter.get_tensor(output_details[0]['index']) predictions = np.squeeze(output_data) prediction = np.argmax(predictions) return(prediction) def runTimeTesting(inputModelName,inputDataSet): objRunTimeUtility=aionRunTimeUtility() return(objRunTimeUtility.executeOnRuntime(inputModelName,inputDataSet))
model_convertions.py
import os import sys import logging import json import joblib from pathlib import Path import platform from datetime import datetime as dt import time import argparse log = None def get_true_option(d, default_value=None): if isinstance(d, dict): for k,v in d.items(): if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True): return k return default_value def convert_keras2onnx(input_model, output_file): import tensorflow as tf import tf2onnx from tensorflow.keras.models import load_model model = load_model(input_model) config = model.get_config() modelInputShape=config["layers"][0]["config"]["batch_input_shape"] spec = (tf.TensorSpec(modelInputShape, tf.float32, name="input"),) model_proto, _ = tf2onnx.convert.from_keras(model, input_signature=spec, opset=13, output_path=output_file) def convert_sklearn2onnx(input_model, file_path, input_shape=1): #print('Instead convert_sklearn2onnx') from skl2onnx import convert_sklearn #print('Instead convert_sklearn2onnx') from skl2onnx.common.data_types import FloatTensorType sklearn_model = joblib.load(input_model) sklearn_model_name= str(type(sklearn_model)).split(".")[-1][:-2] initial_type = [('float_input', FloatTensorType([None, input_shape]))] model = convert_sklearn(sklearn_model, initial_types=initial_type) with open(file_path, "wb") as f: f.write(model.SerializeToString()) def convert_xgboost2onnx(input_model, file_path, input_shape=1): from onnxmltools.convert import convert_xgboost from onnxmltools.convert.common.data_types import FloatTensorType xgBoost_model = joblib.load(input_model) if not xgBoost_model.n_estimators: xgBoost_model.n_estimators = xgBoost_model.get_num_boosting_rounds() n_features = xgBoost_model.n_features_in_ xgBoost_model.get_booster().feature_names = [f'f{x}' for x in range(n_features)] initial_type = [('float_input', FloatTensorType([None, xgBoost_model.n_features_in_]))] model = convert_xgboost(xgBoost_model, initial_types=initial_type) with open(file_path, "wb") as f: f.write(model.SerializeToString()) def convert_lightgbm2onnx(input_model, file_path): from onnxmltools.convert import convert_lightgbm from onnxmltools.convert.common.data_types import FloatTensorType lightGBM_model = joblib.load(input_model) initial_type = [('float_input', FloatTensorType([None, lightGBM_model.n_features_in_]))] model = convert_lightgbm(lightGBM_model, initial_types=initial_type, zipmap=False) with open(file_path, "wb") as f: f.write(model.SerializeToString()) def convert_coreml2onnx(input_model, file_path): import coremltools import onnxmltools coreml_model = coremltools.utils.load_spec(input_model) onnx_model = onnxmltools.convert_coreml(coreml_model) onnxmltools.utils.save_model(onnx_model, file_path) def convert_tflite2onnx(input_model, file_path): cmd = f"{sys.executable} -m tf2onnx.convert --opset 13 --tflite {str(input_model)} --output {str(file_path)}" os.system(cmd) def convert_tensorflow2onnx(input_model, file_path): import subprocess cmd = [sys.executable, '-m','tf2onnx.convert','--saved-model',str(input_model),'--output',str(file_path)] result = subprocess.check_output(cmd) result = result.decode('utf-8') def convert_libsvm2onnx(input_model, file_path): import onnxmltools import libsvm.svmutil as svmutil from onnxmltools.convert.libsvm import convert from onnxmltools.convert.common.data_types import FloatTensorType loaded_model=svmutil.svm_load_model(str(input_model)) model = convert(loaded_model, "node", [('input', FloatTensorType())]) onnxmltools.utils.save_model(model, file_path) def optimize_onnx(onnx_model_file, output_file_path): from onnxruntime.quantization import quantize_dynamic, QuantType quantize_dynamic(onnx_model_file, output_file_path, weight_type=QuantType.QUInt8) return True def convert_keras2tflite(input_model, file_path, optimized=False): import tensorflow as tf converter = tf.compat.v1.lite.TFLiteConverter.from_keras_model_file(input_model) if optimized: converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.target_spec.supported_types = [tf.float16] model = converter.convert() with open(file_path, 'wb') as f: f.write(model) def convert_tensorflow2tflite(input_model, file_path, optimized=False): import tensorflow as tf modelpath=str(input_model) #converter = tf.compat.v1.lite.TFLiteConverter.from_saved_model(input_model) converter = tf.compat.v1.lite.TFLiteConverter.from_saved_model(modelpath) if optimized: converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.target_spec.supported_types = [tf.float16] model = converter.convert() with open(file_path, 'wb') as f: f.write(model) class model_converter(): def __init__(self, model_path, output_path,input_format,output_format, shape=None): if not self.is_conversion_supported(input_format,output_format): raise ValueError(f"{input_format} to {output_format} is not supported") if not Path(model_path).exists(): raise ValueError(f"Model doen't exists at: {model_path}") self.model_path = Path(model_path) self.output_path = Path(output_path) self.output_path.mkdir(parents=True, exist_ok=True) self.input_format = input_format self.output_format = output_format self.shape = shape def is_conversion_supported(self, model_format, output_format): onnx_formats = ['onnx_standard','onnx_optimized'] tflite_formats = ['tflite_standard','tflite_optimized'] sagemaker_formats = ['sagemaker'] all_formats = onnx_formats + tflite_formats + sagemaker_formats formats = {'sklearn':onnx_formats + sagemaker_formats, 'keras':onnx_formats + tflite_formats, 'tensorflow':onnx_formats + tflite_formats, 'tflite':onnx_formats, 'lightgbm':onnx_formats, 'xgboost':onnx_formats, 'libsvm':onnx_formats,'coreml':['onnx_standard'] } if model_format in list(formats.keys()) and output_format in all_formats: if output_format in formats[model_format]: return True return False def convert(self): if self.output_format == 'onnx_standard': output_file = self.output_path/(self.model_path.stem + '.onnx') if self.input_format == 'sklearn': model = convert_sklearn2onnx(self.model_path, output_file, self.shape) elif self.input_format == 'keras': convert_keras2onnx(self.model_path, output_file) elif self.input_format == 'lightgbm': convert_lightgbm2onnx(self.model_path, output_file) elif self.input_format == 'xgboost': convert_xgboost2onnx(self.model_path, output_file) elif self.input_format == 'libsvm': convert_libsvm2onnx(self.model_path, output_file) elif self.input_format == 'coreml': convert_coreml2onnx(self.model_path, output_file) elif self.input_format == 'tflite': convert_tflite2onnx(self.model_path, output_file) elif self.input_format == 'tensorflow': convert_tensorflow2onnx(self.model_path, output_file) elif self.output_format == 'onnx_optimized': onnx_std_file = self.output_path/(self.model_path.stem + '_unquant.onnx') if onnx_std_file.exists(): onnx_std_file.unlink() output_file = self.output_path/(self.model_path.stem + 'Opt.onnx') if self.input_format == 'sklearn': convert_sklearn2onnx(self.model_path, onnx_std_file, self.shape) elif self.input_format == 'keras': convert_keras2onnx(self.model_path, onnx_std_file) elif self.input_format == 'lightgbm': convert_lightgbm2onnx(self.model_path, onnx_std_file) elif self.input_format == 'xgboost': convert_xgboost2onnx(self.model_path, onnx_std_file) elif self.input_format == 'libsvm': convert_libsvm2onnx(self.model_path, onnx_std_file) elif self.input_format == 'tflite': convert_tflite2onnx(self.model_path, onnx_std_file) elif self.input_format == 'tensorflow': convert_tensorflow2onnx(self.model_path, onnx_std_file) if onnx_std_file.exists(): try: optimize_onnx(onnx_std_file, output_file) except Exception as e: raise finally: onnx_std_file.unlink() temp_file = onnx_std_file.parent/(onnx_std_file.stem + '-opt.onnx') if temp_file.exists(): temp_file.unlink() elif self.output_format in ['tflite_standard', 'tflite_optimized']: if self.output_format == 'tflite_optimized': output_file = self.output_path/(self.model_path.stem + 'Opt.tflite') optimized = True else: output_file = self.output_path/(self.model_path.stem + '.tflite') optimized = False if self.input_format == 'keras': convert_keras2tflite(self.model_path, output_file, optimized) elif self.input_format == 'tensorflow': convert_tensorflow2tflite(self.model_path, output_file, optimized) def run(model_path, output_path, input_format, output_format, input_shape=None): from appbe.dataPath import LOG_LOCATION input_format = input_format.lower() output_format = output_format.lower() log_file_path = Path(LOG_LOCATION) log_file_path.mkdir(parents=True, exist_ok=True) time_stamp = dt.fromtimestamp(time.time()).strftime('%Y-%m-%d-%H-%M-%S') fileName='modelConversion_'+time_stamp+'.log' filehandler = logging.FileHandler(log_file_path/fileName, 'w','utf-8') formatter = logging.Formatter('%(message)s') filehandler.setFormatter(formatter) log = logging.getLogger('modelConversionUtility') log.propagate = False for hdlr in log.handlers[:]: # remove the existing file handlers if isinstance(hdlr,logging.FileHandler): log.removeHandler(hdlr) log.addHandler(filehandler) log.setLevel(logging.INFO) log.info('------------------ModelConversionUtility---------------------') log.info(f'Input model path: {model_path}') log.info(f'Output model path: {output_path}') log.info(f'Input model format: {input_format}') log.info(f'Output model format: {output_format}') log.info(f'\nConverting {input_format} to {output_format} start:') output ={} output['logfiles'] = str(log_file_path/fileName) log.info(f"\nExecution Start Time: {dt.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')}") try: start_time = time.time() objConvUtility=model_converter(model_path,output_path,input_format,output_format,input_shape) objConvUtility.convert() end_time = time.time() log.info(f"Time required for conversion: {end_time - start_time} sec") log.info(f'\nConverting {input_format} to {output_format} Successful') output['Convert'] = "Success" except Exception as e: output['Convert'] = "Error" log.info('Error: ' + str(e)) log.error(e, exc_info=True) if 'not supported' in str(e): output['sub error'] = "Not supported" output = json.dumps(output) log.info(f'Output: {output}') return output def convert(config_file): with open(config_file, 'r') as f: config = json.load(f) model_path = config['advance']['aionConversionUtility']['modelpath'] output_path = config['advance']['aionConversionUtility']['deployedlocation'] input_format = get_true_option(config['advance']['aionConversionUtility']['inputModelType'],'').lower() output_format = get_true_option(config['advance']['aionConversionUtility']['outputModelType'],'').lower() if input_format=="keras": input_shape = int(config['advance']['aionConversionUtility']['inputShape']) if input_format!="keras": input_shape = config['advance']['aionConversionUtility']['numberoffeatures'] input_shape = int(input_shape) if input_shape else 0 #input_shape = int(config['advance']['aionConversionUtility']['numberoffeatures']) output = run(model_path, output_path, input_format, output_format, input_shape) print(output)
run_onnxinference.py
import pandas import numpy import sys import onnxruntime as rt def onnx_runtime_validation(modelfile,datafile): dataframe = pandas.read_csv(datafile) df = dataframe.head(8) dataset = df.values sess = rt.InferenceSession(modelfile) input_name = sess.get_inputs()[0].name label_name = sess.get_outputs()[0].name inputsize=sess.get_inputs()[0].shape XYZ = dataset[:,0:inputsize[1]].astype(float) pred_onx = sess.run([label_name], {input_name: XYZ.astype(numpy.float32)[0:8]})[0] print("Prediction of AION generated/converted model on ONNX runtime for 8 sets of data") for i in range(0, 8): output = numpy.squeeze(pred_onx[i]) predictions = numpy.squeeze(output) prediction = numpy.argmax(predictions) df['predictions'] = predictions result = df.to_json(orient="records") return(result) if __name__ == "__main__": output = onnx_runtime_validation(sys.argv[1],sys.argv[2]) print("predictions:",output)
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
aionNAS.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import logging logging.getLogger('tensorflow').disabled = True import json #from nltk.corpus import stopwords from collections import Counter from numpy import mean from numpy import std from pandas import read_csv from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.compose import ColumnTransformer from learner.machinelearning import machinelearning # from sklearn.dummy import DummyClassifier # create histograms of numeric input variables import sys import os import re import pandas as pd import numpy as np from learner.aion_matrix import aion_matrix import tensorflow as tf tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) import autokeras as ak # load the sonar dataset from sklearn.model_selection import train_test_split # from sklearn.metrics import cohen_kappa_score # from sklearn.metrics import roc_auc_score # from sklearn.metrics import confusion_matrix from sklearn.metrics import roc_curve from math import sqrt from sklearn.metrics import mean_squared_error, explained_variance_score,mean_absolute_error from sklearn import metrics class aionNAS: def __init__(self,nas_class,nas_params,xtrain1,xtest1,ytrain1,ytest1,deployLocation): try: self.dfFeatures=None self.nas_class=nas_class self.nas_params=nas_params self.targetFeature=None self.log = logging.getLogger('eion') self.n_models=int(self.nas_params['n_models']) self.n_epochs=int(self.nas_params['n_epochs']) self.optimizer=self.nas_params['optimizer'] self.metrics=self.nas_params['metrics'] self.tuner=self.nas_params['tuner'] self.seed=int(self.nas_params['seed']) self.xtrain = xtrain1 self.xtest = xtest1 self.ytrain = ytrain1 self.ytest = ytest1 #self.labelMaps = labelMaps self.deployLocation=deployLocation except Exception as e: self.log.info('<!------------- NAS INIT Error ---------------> ') exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def paramCheck(self): try: if not (self.nas_class): self.log.info('<!------------- NAS class input Error ---------------> ') if not (self.nas_params): self.log.info('<!------------- NAS model hyperparameter input Error ---------------> ') if not (self.targetFeature): self.log.info('<!------------- NAS model targetFeature input Error ---------------> ') if (self.n_models < 1): self.n_models=1 if not (self.dfFeatures): self.log.info('<!------------- NAS model features Error ---------------> ') if (self.n_epochs < 1): self.n_models=1 if not (self.optimizer): self.optimizer="adam" if not (self.tuner): self.tuner="greedy" if (self.seed < 1): self.seed=0 if not (self.metrics): self.metrics=None except ValueError: self.log.info('<------------------ NAS config file error. --------------->') def recall_m(self,y_true, y_pred): true_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_true * y_pred, 0, 1))) possible_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + tf.keras.backend.epsilon()) return recall def precision_m(self,y_true, y_pred): true_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_true * y_pred, 0, 1))) predicted_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + tf.keras.backend.epsilon()) return precision def f1_score(self,y_true, y_pred): precision = self.precision_m(y_true, y_pred) recall = self.recall_m(y_true, y_pred) return 2*((precision*recall)/(precision+recall+tf.keras.backend.epsilon())) def nasStructdataPreprocess(self): df=self.data self.paramCheck() target=df[self.targetFeature].values counter = Counter(target) for k,v in counter.items(): per = v / len(target) * 100 self.log.info('autokeras struct Class=%d, Count=%d, Percentage=%.3f%%' % (k, v, per)) # select columns with numerical data types num_ix = df.select_dtypes(include=['int64', 'float64']).columns subset = df[num_ix] last_ix = len(df.columns) - 1 y=df[self.targetFeature] X = df.drop(self.targetFeature, axis=1) #Using Pearson Correlation # plt.figure(figsize=(12,10)) # cor = df.corr() # sns.heatmap(cor, annot=True, cmap=plt.cm.Reds) # plt.show() # select categorical features cat_ix = X.select_dtypes(include=['object', 'bool']).columns # one hot encode cat features only ct = ColumnTransformer([('o',OneHotEncoder(),cat_ix)], remainder='passthrough') X = X.reset_index() X=X.replace(to_replace="NULL",value=0) X = X.dropna(how='any',axis=0) X = ct.fit_transform(X) from sklearn.preprocessing import scale X = scale(X) # label encode the target variable to have the classes 0 and 1 y = LabelEncoder().fit_transform(y) # separate into train and test sets X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=self.test_size,random_state=1) return X_train, X_test, y_train, y_test def nasStructClassification(self,scoreParam): try: objClf = aion_matrix() X_train, X_test, y_train, y_test= self.xtrain, self.xtest, self.ytrain, self.ytest modelName="nas_structdata_classifier" self.log.info("Processing structured data block...\n") s_in = ak.StructuredDataInput() #s_in = Flatten()(s_in) s_out = ak.StructuredDataBlock(categorical_encoding=True)(s_in) self.log.info("Data pipe via autokeras Classification Dense layers ...\n") s_out = ak.ClassificationHead()(s_out) self.log.info("applying autokeras automodel to run different neural models...\n") try: tuner = str(self.tuner).lower() except UnicodeEncodeError: tuner = (self.tuner.encode('utf8')).lower() nasclf = ak.AutoModel( inputs=s_in, outputs=s_out, overwrite=True, tuner=tuner, max_trials=self.n_models, seed=self.seed) # compile the model #nasclf.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc',self.f1_score,self.precision_m, self.recall_m]) nasclf.fit(X_train, y_train, epochs=self.n_epochs) best_model = nasclf.export_model() mpredict=best_model.predict(X_test) mtpredict=best_model.predict(X_train) #loss, accuracy, f1_score, precision, recall = nasclf.evaluate(X_test, y_test, verbose=0) #from sklearn.metrics import classification_report #Classification report y_pred_bool = np.argmax(mpredict, axis=1) y_train_pred_bool = np.argmax(mtpredict, axis=1) score = objClf.get_score(scoreParam,y_test, y_pred_bool) #best_model = nasclf.export_model() best_model_summary=best_model.summary() filename = os.path.join(self.deployLocation,'log','summary.txt') with open(filename,'w') as f: best_model.summary(print_fn=lambda x: f.write(x + '\n')) f.close() #self.log.info("==========") #self.log.info(best_model_summary) self.log.info("NAS struct data classification, best model summary: \n"+str(best_model.summary(print_fn=self.log.info))) #self.log.info("==========") #Save and load model # # #try: # try: # best_model.save("model_class_autokeras", save_format="tf") # except Exception: # best_model.save("model_class_autokeras.h5") # loaded_model = load_model("model_class_autokeras", custom_objects=ak.CUSTOM_OBJECTS) # loadedmodel_predict=loaded_model.predict(X_test) loss,accuracy_m=nasclf.evaluate(X_test, y_test) #mpredict_classes = mpredict.argmax(axis=-1) #accuracy = accuracy_score(y_test.astype(int), mpredict.astype(int)) # precision tp / (tp + fp) #precision = precision_score(y_test.astype(int), mpredict.astype(int),average='macro') # recall: tp / (tp + fn) #recall = recall_score(y_test.astype(int), mpredict.astype(int),average='macro') #f1score=f1_score(y_test.astype(int), mpredict.astype(int) , average="macro") self.log.info("Autokeras struct data classification metrics: \n") except Exception as inst: self.log.info("Error: NAS failed "+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) print(inst) return modelName,nasclf,score def nasStructRegressor(self,scoreParam): objClf = aion_matrix() modelName="nas_struct_regressor" #self.paramCheck() X_train, X_test, y_train, y_test= self.xtrain, self.xtest, self.ytrain, self.ytest # Autokeras alg s_in = ak.StructuredDataInput() #tf.keras.layers.GlobalMaxPooling2D()(s_in) s_out = ak.StructuredDataBlock(categorical_encoding=True)(s_in) self.log.info("Data pipe via autokeras Regression Dense layers ...\n") s_out = ak.RegressionHead(loss='mse', metrics=['mae'])(s_out) self.log.info("applying autokeras automodel to evaluate different neural models...\n") try: tuner = str(self.tuner).lower() except UnicodeEncodeError: tuner = (self.tuner.encode('utf8')).lower() nas_reg = ak.AutoModel( inputs=s_in, outputs=s_out, overwrite=True, tuner=tuner, max_trials=self.n_models) nas_reg.fit(X_train, y_train, epochs=self.n_epochs) best_model = nas_reg.export_model() self.log.info("NAS struct data regression best model summary: \n") best_model_summary=best_model.summary(print_fn=self.log.info) self.log.info(best_model_summary) predictm=best_model.predict(X_test) mtpredict=best_model.predict(X_train) score = objClf.get_score(scoreParam,y_test, predictm) self.log.info("Autokeras struct data regression metrics: \n") return modelName,nas_reg,score def nasMain(self,scoreParam): modelName = "" nasclf=None nas_reg=None #text_reg_model=None mse_value=0 reg_rmse=0 mape_reg=0 huber_loss_reg=0 accuracy=0 precision=0 recall=0 #Dummy values to return main for classification problems dummy_score_1=int(0) #dummy_score_2=int(0) try: if ((self.nas_class.lower() == "classification")): modelName,nasclf,score=self.nasStructClassification(scoreParam) self.log.info('NAS Struct Classification score: '+str(score)) best_model_nas = nasclf.export_model() scoredetails = '{"Model":"NAS","Score":'+str(round(score,2))+'}' return best_model_nas,self.nas_params,round(score,2),'NAS',-1,-1,-1 elif (self.nas_class.lower() == "regression"): modelName,nas_reg,score =self.nasStructRegressor(scoreParam) self.log.info('NAS Struct Regression score: '+str(score)) best_model_nas = nas_reg.export_model() ''' filename = os.path.join(self.deployLocation,'model','autoKerasModel') best_model_nas = nas_reg.export_model() try: best_model_nas.save(filename, save_format="tf") modelName = 'autoKerasModel' except Exception: filename = os.path.join(self.deployLocation,'model','autoKerasModel.h5') best_model_nas.save(filename) modelName = 'autoKerasModel.h5' ''' scoredetails = '{"Model":"NAS","Score":'+str(round(score,2))+'}' ''' error_matrix = '"MSE":"'+str(round(mse_value,2))+'","RMSE":"'+str(round(reg_rmse,2))+'","MAPE":"'+str(round(mape_reg,2))+'","MSLE":"'+str(round(msle_reg,2))+'"' ''' return best_model_nas,self.nas_params,score,'NAS' else: pass except Exception as inst: print(inst) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) output = {"status":"FAIL","message":str(inst).strip('"')} output = json.dumps(output)