content
stringlengths 255
17.2k
|
---|
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
import pickle
import logging
class recommendersystem():
def __init__(self,features,svd_params):
self.features = features
self.svd_input = svd_params
self.log = logging.getLogger('eion')
print ("recommendersystem starts \\n")
#To extract dict key,values
def extract_params(self,dict):
self.dict=dict
for k,v in self.dict.items():
return k,v
def recommender_model(self,df,outputfile):
from sklearn.metrics.pairwise import cosine_similarity
from utils.file_ops import save_csv
USER_ITEM_MATRIX = 'user_item_matrix'
ITEM_SIMILARITY_MATRIX = 'item_similarity_matrix'
selectedColumns = self.features.split(',')
data = pd.DataFrame()
for i in range(0,len(selectedColumns)):
data[selectedColumns[i]] = df[selectedColumns[i]]
dataset = data
self.log.info('-------> Top(5) Rows')
self.log.info(data.head(5))
start = time.time()
self.log.info('\\n----------- Recommender System Training Starts -----------')
#--------------- Task 11190:recommender system changes Start ---Usnish------------------#
# selectedColumns = ['userId', 'movieId', 'rating']
df_eda = df.groupby(selectedColumns[1]).agg(mean_rating=(selectedColumns[2], 'mean'),number_of_ratings=(selectedColumns[2], 'count')).reset_index()
self.log.info('-------> Top 10 most rated Items:')
self.log.info(df_eda.sort_values(by='number_of_ratings', ascending=False).head(10))
matrix = data.pivot_table(index=selectedColumns[1], columns=selectedColumns[0], values=selectedColumns[2])
relative_file = os.path.join(outputfile, 'data', USER_ITEM_MATRIX + '.csv')
matrix.to_csv(relative_file)
item_similarity_cosine = cosine_similarity(matrix.fillna(0))
item_similarity_cosine = pd.DataFrame(item_similarity_cosine,columns=pd.Series([i + 1 for i in range(item_similarity_cosine.shape[0])],name='ItemId'),index=pd.Series([i + 1 for i in range(item_similarity_cosine.shape[0])],name='ItemId'))
self.log.info('---------> Item-Item Similarity matrix created:')
self.log.info(item_similarity_cosine.head(5))
relative_file = os.path.join(outputfile, 'data', ITEM_SIMILARITY_MATRIX + '.csv')
save_csv(item_similarity_cosine,relative_file)
# --------------- recommender system changes End ---Usnish------------------#
executionTime=time.time() - start
self.log.info("------->Execution Time: "+str(executionTime))
self.log.info('----------- Recommender System Training End -----------\\n')
return "filename",matrix,"NA","",""<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import numpy as np
import pickle
import pandas as pd
import sys
import time
import os
from os.path import expanduser
import platform
from sklearn.preprocessing import binarize
import logging
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorflow.keras import preprocessing
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import Input, Embedding, LSTM, Lambda
import tensorflow.keras.backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Concatenate
from tensorflow.keras.layers import Input, Dense, Flatten, GlobalMaxPool2D, GlobalAvgPool2D, Concatenate, Multiply, Dropout, Subtract, Add, Conv2D
from sklearn.metrics.pairwise import cosine_similarity, cosine_distances
import tensorflow.keras.backend as K
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras import layers, utils, callbacks, optimizers, regularizers
## Keras subclassing based siamese network
class siameseNetwork(Model):
def __init__(self, activation,inputShape, num_iterations):
self.activation=activation
self.log = logging.getLogger('eion')
super(siameseNetwork, self).__init__()
i1 = layers.Input(shape=inputShape)
i2 = layers.Input(shape=inputShape)
featureExtractor = self.build_feature_extractor(inputShape, num_iterations)
f1 = featureExtractor(i1)
f2 = featureExtractor(i2)
#distance vect
distance = layers.Concatenate()([f1, f2])
cosine_loss = tf.keras.losses.CosineSimilarity(axis=1)
c_loss=cosine_loss(f1, f2)
similarity = tf.keras.layers.Dot(axes=1,normalize=True)([f1,f2])
outputs = layers.Dense(1, activation="sigmoid")(distance)
self.model = Model(inputs=[i1, i2], outputs=outputs)
##Build dense sequential layers
def build_feature_extractor(self, inputShape, num_iterations):
layers_config = [layers.Input(inputShape)]
for i, n_units in enumerate(num_iterations):
layers_config.append(layers.Dense(n_units))
layers_config.append(layers.Dropout(0.2))
layers_config.append(layers.BatchNormalization())
layers_config.append(layers.Activation(self.activation))
model = Sequential(layers_config, name='feature_extractor')
return model
def call(self, x):
return self.model(x)
def euclidean_distance(vectors):
(f1, f2) = vectors
sumSquared = K.sum(K.square(f1 - f2), axis=1, keepdims=True)
return K.sqrt(K.maximum(sumSquared, K.epsilon()))
def cosine_similarity(vectors):
(f1, f2) = vectors
f1 = K.l2_normalize(f1, axis=-1)
f2 = K.l2_normalize(f2, axis=-1)
return K.mean(f1 * f2, axis=-1, keepdims=True)
def cos_dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0],1)
class eion_similarity_siamese:
def __init__(self):
self.log = logging.getLogger('eion')
def siamese_model(self,df,col1,col2,targetColumn,conf,pipe,deployLocation,iterName,iterVersion,testPercentage,predicted_data_file):
try:
self.log.info('-------> Read Embedded File')
home = expanduser("~")
if platform.system() == 'Windows':
modelsPath = os.path.join(home,'AppData','Local','HCLT','AION','PreTrainedModels','TextSimilarity')
else:
modelsPath = os.path.join(home,'HCLT','AION','PreTrainedModels','TextSimilarity')
if os.path.isdir(modelsPath) == False:
os.makedirs(modelsPath)
embedding_file_path = os.path.join(modelsPath,'glove.6B.100d.txt')
if not os.path.exists(embedding_file_path):
from pathlib import Path
import urllib.request
import zipfile
location = modelsPath
local_file_path = os.path.join(location,"glove.6B.zip")
file_test, header_test = urllib.request.urlretrieve('http://nlp.stanford.edu/data/wordvecs/glove.6B.zip', local_file_path)
with zipfile.ZipFile(local_file_path, 'r') as zip_ref:
zip_ref.extractall(location)
os.unlink(os.path.join(location,"glove.6B.zip"))
if os.path.isfile(os.path.join(location,"glove.6B.50d.txt")):
os.unlink(os.path.join(location,"glove.6B.50d.txt"))
if os.path.isfile(os.path.join(location,"glove.6B.300d.txt")):
os.unlink(os.path.join(location,"glove.6B.300d.txt"))
if os.path.isfile(os.path.join(location,"glove.6B.200d.txt")):
os.unlink(os.path.join(location,"glove.6B.200d.txt"))
X = df[[col1,col2]]
Y = df[targetColumn]
testPercentage = testPercentage
self.log.info('\\n-------------- Test Train Split ----------------')
if testPercentage == 0:
xtrain=X
ytrain=Y
xtest=X
ytest=Y
else:
testSize=testPercentage/100
self.log.info('-------> Split Type: Random Split')
self.log.info('-------> Train Percentage: '+str(testSize))
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=testSize)
self.log.info('-------> Train Data Shape: '+str(X_train.shape)+' ---------->')
self.log.info('-------> Test Data Shape: '+str(X_test.shape)+' ---------->')
self.log.info('-------------- Test Train Split End ----------------\\n')
self.log.info('\\n-------------- Train Validate Split ----------------')
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.20, random_state=42)
self.log.info('-------> Train Data Shape: '+str(X_train.shape)+' ---------->')
self.log.info('-------> Validate Data Shape: '+str(X_val.shape)+' ---------->')
self.log.info('-------------- Train Validate Split End----------------\\n')
self.log.info('Status:- |... Train / test split done: '+str(100-testPercentage)+'% train,'+str(testPercentage)+'% test')
train_sentence1 = pipe.texts_to_sequences(X_train[col1].values)
train_sentence2 = pipe.texts_to_sequences(X_train[col2].values)
val_sentence1 = pipe.texts_to_sequences(X_val[col1].values)
val_sentence2 = pipe.texts_to_sequences(X_val[col2].values)
len_vec = [len(sent_vec) for sent_vec in train_sentence1]
max_len = np.max(len_vec)
len_vec = [len(sent_vec) for sent_vec in train_sentence2]
if (max_len < np.max(len_vec)):
max_len = np.max(len_vec)
train_sentence1 = pad_sequences(train_sentence1, maxlen=max_len, padding='post')
train_sentence2 = pad_sequences(train_sentence2, maxlen=max_len, padding='post')
val_sentence1 = pad_sequences(val_sentence1, maxlen=max_len, padding='post')
val_sentence2 = pad_sequences(val_sentence2, maxlen=max_len, padding='post')
y_train = y_train.values
y_val = y_val.values
activation = str(conf['activation'])
model = siameseNetwork(activation,inputShape=train_sentence1.shape[1], num_iterations=[10])
model.compile(
loss="binary_crossentropy",
optimizer=optimizers.Adam(learning_rate=0.0001),
metrics=["accuracy"])
es = callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=1, restore_best_weights=True)
rlp = callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.1, patience=2, min_lr=1e-10, mode='min', verbose=1
)
x_valid=X_val
y_valid=y_val
n_epoch = int(conf['num_epochs'])
batch_size = int(conf['batch_size'])
similarityIndex = conf['similarityIndex']
model.fit([train_sentence1,train_sentence2],y_train.reshape(-1,1), epochs = n_epoch,batch_size=batch_size,
validation_data=([val_sentence1, val_sentence2],y_val.reshape(-1,1)),callbacks=[es, rlp])
scores = model.evaluate([val_sentence1, val_sentence2], y_val.reshape(-1,1), verbose=0)
self.log.info('-------> Model Score Matrix: Accuracy')
self.log.info('-------> Model Score (Validate Data) : '+str(scores[1]))
self.log.info('Status:- |... Algorithm applied: SIAMESE')
test_sentence1 = pipe.texts_to_sequences(X_test[col1].values)
test_sentence2 = pipe.texts_to_sequences(X_test[col2].values)
test_sentence1 = pad_sequences(test_sentence1, maxlen=max_len, padding='post')
test_sentence2 = pad_sequences(test_sentence2, maxlen=max_len, padding='post')
prediction = model.predict([test_sentence1, test_sentence2 ])
n_epoch = conf['num_epochs']
batch_size = conf['batch_size']
activation = conf['activation']
similarityIndex = conf['similarityIndex']
self.log.info('-------> similarityIndex : '+str(similarityIndex))
prediction = np.where(prediction > similarityIndex,1,0)
rocauc_sco = roc_auc_score(y_test, |
prediction)
acc_sco = accuracy_score(y_test, prediction)
predict_df = pd.DataFrame()
predict_df['actual'] = y_test
predict_df['predict'] = prediction
predict_df.to_csv(predicted_data_file)
self.log.info('-------> Model Score Matrix: Accuracy')
self.log.info('-------> Model Score (Validate Data) : '+str(scores[1]))
self.log.info('Status:- |... Algorithm applied: SIAMESE')
test_sentence1 = pipe.texts_to_sequences(X_test[col1].values)
test_sentence2 = pipe.texts_to_sequences(X_test[col2].values)
test_sentence1 = pad_sequences(test_sentence1, maxlen=max_len, padding='post')
test_sentence2 = pad_sequences(test_sentence2, maxlen=max_len, padding='post')
prediction = model.predict([test_sentence1, test_sentence2 ])
prediction = np.where(prediction > similarityIndex,1,0)
rocauc_sco = roc_auc_score(y_test,prediction)
acc_sco = accuracy_score(y_test, prediction)
predict_df = pd.DataFrame()
predict_df['actual'] = y_test
predict_df['predict'] = prediction
predict_df.to_csv(predicted_data_file)
self.log.info("predict_df: \\n"+str(predict_df))
sco = acc_sco
self.log.info('-------> Test Data Accuracy Score : '+str(acc_sco))
self.log.info('Status:- |... Testing Score: '+str(acc_sco))
self.log.info('-------> Test Data ROC AUC Score : '+str(rocauc_sco))
matrix = '"Accuracy":'+str(acc_sco)+',"ROC AUC":'+str(rocauc_sco)
prediction = model.predict([train_sentence1, train_sentence2])
prediction = np.where(prediction > similarityIndex,1,0)
train_rocauc_sco = roc_auc_score(y_train,prediction)
train_acc_sco = accuracy_score(y_train, prediction)
self.log.info('-------> Train Data Accuracy Score : '+str(train_acc_sco))
self.log.info('-------> Train Data ROC AUC Score : '+str(train_rocauc_sco))
trainmatrix = '"Accuracy":'+str(train_acc_sco)+',"ROC AUC":'+str(train_rocauc_sco)
model_tried = '{"Model":"SIAMESE","Score":'+str(sco)+'}'
saved_model = 'textsimilarity_'+iterName+'_'+iterVersion
# filename = os.path.join(deployLocation,'model','textsimilarity_'+iterName+'_'+iterVersion+'.sav')
# filename = os.path.join(deployLocation,'model','textsimilarity_'+iterName+'_'+iterVersion+'.h5')
## Because we are using subclassing layer api, please use dir (as below) to store deep learn model instead of .h5 model.
filename = os.path.join(deployLocation,'model','textsimilarity_'+iterName+'_'+iterVersion)
model.save(filename)
# model.save_weights(filename)
model_name = 'SIAMESE MODEL'
return(model_name,scores[1],matrix,trainmatrix,model_tried,saved_model,filename,max_len,similarityIndex)
except Exception as inst:
self.log.info("SIAMESE failed " + str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) <s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import json
#Python sklearn & std libraries
import numpy as np
import pandas as pd
from time_series.ts_arima_eion import eion_arima
from statsmodels.tsa.vector_ar.vecm import coint_johansen
from statsmodels.tsa.vector_ar.var_model import VAR
from math import *
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from math import sqrt
import logging
import os
import sys
import time
from statsmodels.tsa.arima_model import ARIMA
from sklearn.metrics import mean_squared_error
from pandas import read_csv
from statsmodels.tsa.stattools import adfuller
import pmdarima as pm
from statsmodels.tsa.stattools import grangercausalitytests
from statsmodels.stats.stattools import durbin_watson
from sklearn.utils import check_array
class timeseriesModelTests():
def __init__(self,data,targetFeature,datetimeFeature,count):
#self.tsConfig = tsConfig
#self.modelconfig = modelconfig
#self.modelList = modelList
self.data = data
self.targetFeature = targetFeature
self.dateTimeFeature = datetimeFeature
self.count=count
self.log = logging.getLogger('eion')
def StatinaryChecks(self,dictDiffCount):
self.log.info("\\n---------------Start Stationary Checks-----------")
tFeature = self.targetFeature.split(',')
tFeature.append(self.dateTimeFeature)
self.data=self.data[tFeature]
tFeature.remove(self.dateTimeFeature)
lengthtFeature=len(tFeature)
diffCount=0
try :
for features in (tFeature):
XSt = self.data[features]
XSt=XSt.values
resultSt = adfuller(XSt,autolag='AIC')
stationaryFlag = False
#print(resultSt)
self.log.info('-------> Features: '+str(features))
self.log.info('----------> ADF Statistic: '+str(resultSt[0]))
self.log.info('----------> p-value: %f' % resultSt[1])
if resultSt[1]<= 0.05:
self.log.info("-------------> Converted As Stationary Data")
stationaryFlag = True
else:
self.log.info("-------------> Stationary Conversion Required")
stationaryFlag = False
self.log.info('----------> Critical Values')
for key, value in resultSt[4].items():
self.log.info('----------> '+str(key)+': '+str(value))
if stationaryFlag == False:
self.data[features]=self.data[features].diff()
self.data=self.data.dropna()
dictDiffCount[features]=1
XStt = self.data[features]
XStt=XStt.values
resultStt = adfuller(XStt)
if resultStt[1] > 0.05:
self.data[features]=self.data[features].diff()
self.data=self.data.dropna()
dictDiffCount[features]=2
XSttt = self.data[features]
XSttt=XSttt.values
resultSttt = adfuller(XSttt)
if resultSttt[1]<= 0.05:
stationaryFlag = True
else:
stationaryFlag = True
self.log.info("------------->"+str(dictDiffCount))
if stationaryFlag == True:
self.log.info("----------> Equals to Stationary Data")
else:
self.log.info("----------> Not Equal To Stationary Data")
self.log.info("-------> Stationary data diff()")
self.log.info(dictDiffCount)
self.log.info("---------------Start Stationary Checks Ends-----------\\n")
return self.data,dictDiffCount
except Exception as inst:
self.log.info('<!------------- Time Series Stationary Error ---------------> ')
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def varTimeseriesModelTests(self,data):
try :
tFeature = self.targetFeature.split(',')
self.log.info("\\n--------- Start Granger Causality Test Results ------------")
gtest=grangercausalitytests(data[tFeature], maxlag=15, addconst=True, verbose=True)
self.log.info("-------> GrangerCausalitytest Results "+str(gtest.values()))
self.log.info("--------- End Granger Causality Test Results ------------\\n")
return gtest
except Exception as inst:
self.log.info('<!------------- Time Series Granger Causality testTest Error ---------------> ')
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def grangersCausationMatrix(self,data, variables, test='ssr_chi2test', verbose=False):
try :
countVariables=0
self.log.info(len(variables))
self.log.info("\\n--------------Start GrangersCausationMatrix---------------")
df = pd.DataFrame(np.zeros((len(variables), len(variables))), columns=variables, index=variables)
for c in df.columns:
for r in df.index:
test_result = grangercausalitytests(data[[r, c]], maxlag=12, verbose=False)
p_values = [round(test_result[i+1][0][test][1],4) for i in range(12)]
if verbose: print(f'Y = {r}, X = {c}, P Values = {p_values}')
min_p_value = np.min(p_values)
df.loc[r, c] = min_p_value
df.columns = [var + '_x' for var in variables]
df.index = [var + '_y' for var in variables]
self.log.info(df)
for i in range(len(variables)):
for j in range(len(variables)):
if i!=j and df.iloc[i][j]<0.05 and df.iloc[i][j]<0.05:
countVariables=countVariables+1
self.log.info("--------------End GrangersCausationMatrix---------------\\n")
return df,countVariables
except Exception as inst:
self.log.info('<!------------- Time Series grangersCausationMatrix Test Error ---------------> ')
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
return df,countVariables
def coIntegrationTest(self,data):
try :
tdata = data.drop([self.dateTimeFeature], axis=1)
tdata.index = data[self.dateTimeFeature]
cols = tdata.columns
self.log.info("\\n-------------- Start of the Co Integration test ---------------")
lenTargetFeature=len(self.targetFeature)
countIntegrationFeature=0
N, l = tdata.shape
jres = coint_johansen(tdata, 0, 1)
trstat = jres.lr1
tsignf = jres.cvt
for i in range(l):
if trstat[i] > tsignf[i, 1]:
r = i + 1
jres.r = r
jres.evecr = jres.evec[:, :r]
jres.r = r
countIntegrationFeature=jres.r
jres.evecr = jres.evec[:, :r]
self.log.info('------->coint_johansen trace statistics: '+str(trstat))
self.log.info('------->coint_johansen critical values:')
self.log.info(tsignf)
self.log.info("------->There are "+str(countIntegrationFeature)+" Co-Integration vectors")
self.log.info("-------------- End of the Co Integration test ---------------\\n")
return countIntegrationFeature
except Exception as inst:
self.log.info('<!------------- Time Series Co-Integration Test Error ---------------> ')
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = |
os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
# For timeseries pyramid pdaarima module
import json
#Python sklearn & std libraries
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import VarianceThreshold
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
#from sklearn.metrics import mean_absolute_percentage_error
from sklearn.linear_model import LinearRegression
from math import sqrt
import warnings
# For serialization.
#from sklearn.externals import joblib
import pickle
import os,sys
# For ploting (mathlab)
import matplotlib.pyplot as plt
import plotly
#Import eion config manager module
import logging
from sklearn import metrics
from sklearn.metrics import accuracy_score
import time
import random
import statsmodels.api as sm
# prophet by Facebook
# time series analysis
#from statsmodels.tsa.seasonal import seasonal_decompose
#from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from prophet.plot import plot_plotly,plot_components_plotly
#import seaborn as sns
from sklearn.model_selection import ParameterGrid
import holidays
#from prophet.diagnostics import performance_metrics
#from prophet.diagnostics import cross_validation
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
import logging,sys
from scipy.special import inv_boxcox
from prophet.diagnostics import cross_validation
#from sklearn.metrics import mean_absolute_percentage_error
warnings.filterwarnings("ignore")
# Aion Prophet module
class aion_fbprophet ():
#Constructor
def __init__(self,configfile,testpercentage,data,targetFeature,dateTimeFeature):
try:
self.tsprophet_params = configfile
self.data=data
self.targetFeature=targetFeature
self.dateTimeFeature=dateTimeFeature
self.testpercentage = testpercentage
self.log = logging.getLogger('eion')
except Exception as inst:
self.log.info('<!------------- Prophet INIT Error ---------------> ')
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
#Find datetime column
def get_datetime_col(self,data):
df=data
dt_col=[]
categorical_features=[]
discrete_features=[]
# Here, I am checking each column type, whether it is object type or float or int. Then I am trying to convert the
# Object type to datetime format using python pd.to_datetime() function. If the column converts , it is datetime format, else it is some other format (categorical or discrete)
for col in df.columns:
if (df[col].dtype == 'object' or df[col].dtype == 'datetime64[ns]' ):
try:
df[col] = pd.to_datetime(df[col])
dt_col.append(col)
except ValueError:
categorical_features.append(col)
pass
elif (df[col].dtype == 'float64' or 'int64' or 'int' or 'float64' or 'float'): #('int' or 'float' or 'int64' or 'float64')):
#print("discrete features found..\\n")
discrete_features.append(col)
else:
pass
#Uncomment to know the datetime, categorical and continuous cols
# print ("Date time colms: dt_col: \\n",dt_col)
# print("categorical features: \\n",categorical_features)
# print("continuous features: \\n",discrete_features)
return dt_col
def get_predict_frequency(self,df,datetime_col_name):
#dt_col=pd.to_datetime(df[datetime_col_name], format='%m/%d/%Y %H:%M:%S')
dt_col=pd.to_datetime(df[datetime_col_name])
#df['tvalue'] = df[datetime_col_name]
df['time_diff'] = (df[datetime_col_name]-df[datetime_col_name].shift()).fillna(pd.Timedelta('0'))
mean_diff_dt=df['time_diff'].mean()
time_diff_secs=mean_diff_dt.total_seconds()
time_sec_2_hr=((time_diff_secs/60)/60)
pred_freq=""
time_sec_2_hr=round(time_sec_2_hr)
#For abbreviation ,refer https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases
if (time_sec_2_hr < 1):
pred_freq="min"
else:
if (time_sec_2_hr >= 24):
if (time_sec_2_hr > 168):
if(time_sec_2_hr > 696 or time_sec_2_hr < 744): # based on 29 days, to 31 days
if(time_sec_2_hr > 8760):
pred_freq="Y"
else:
pred_freq="M"
else:
pred_freq="W"
else:
pred_freq="D"
else:
pred_freq="H"
pass
return pred_freq
#To extract dict key,values
def extract_params(self,dict):
self.dict=dict
for k,v in self.dict.items():
return k,v
def mean_absolute_percentage_error(self,y_true, y_pred):
if (y_true.isin([0]).sum() > 0):
y_true=y_true.mask(y_true==0).fillna(y_true.mean())
try:
y_true, y_pred=np.array(y_true), np.array(y_pred)
#return np.mean(np.abs((y_true - y_pred) / y_true+sys.float_info.epsilon)) * 100
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
except Exception as inst:
self.log.info('<------------- mean_absolute_percentage_error ---------------> ')
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def regressor_list(self,regressorstr):
lst = regressorstr.split (",")
reg_list=[]
for i in lst:
reg_list.append(i)
#print(reg_list)
return reg_list
# def get_regressors(self,reg):
# print("get extra inputs for prophet...\\n")
def aion_probhet(self,train_data,datetime_col_name,predicted_data_file,dataFolderLocation):
from prophet import Prophet
#Getting prophet params
#key,val = self.extract_params(self.tsprophet_params)
val=self.tsprophet_params
self.log.info('-------> The given prophet algorithm parameters:>>')
self.log.info(" "+str(val))
changepoint_prior_scale=[]
changepoint_range=[]
mcmc_samples=[]
interval_width=[]
holidays_prior_scale=[]
n_changepoints=[]
uncertainty_samples=[]
seasonality_prior_scale=[]
seasonality_mode=""
yearly_seasonality=None
weekly_seasonality=None
daily_seasonality=None
additional_regressors=""
holiday_country_name=""
holiday_years=[]
no_of_periods=0
pred_frequncy=""
for k,v in val.items():
try:
if (k == "seasonality_mode"):
seasonality_mode=v
elif (k == "changepoint_prior_scale"):
changepoint_prior_scale=[float(i) for i in v.split(',')]
elif (k == "changepoint_range"):
changepoint_range=[float(i) for i in v.split(',')]
elif (k == "yearly_seasonality"):
if v.lower() == 'true':
yearly_seasonality=True
elif v.lower() == 'false':
yearly_seasonality=False
elif v.lower() == 'auto':
yearly_seasonality=v
else:
yearly_seasonality=True
elif (k == "weekly_seasonality"):
if v.lower() == 'true':
weekly_seasonality=True
elif v.lower() == 'false':
weekly_seasonality=False
elif v.lower() == 'auto':
weekly_seasonality=v
else:
weekly_seasonality=False
#weekly_seasonality=v
elif (k == "daily_seasonality"):
if v.lower() == 'true':
daily_seasonality=True
elif v.lower() == 'false':
daily_seasonality=False
elif v.lower() == 'auto':
daily_seasonality=v
else:
daily_seasonality=False
elif (k == "mcmc_samples"):
mcmc_samples=[float(i) for i in v.split(',')]
elif (k == "interval_width"):
interval_width=[float(i) for i in v.split(',')]
elif (k == "holidays_prior_scale"):
#holidays_prior_scale=float(v)
holidays_prior_scale=[float(i) for i in v.split(',')]
elif (k == "n_changepoints"):
n_changepoints=[int(i) for i in v.split(',')]
elif (k == "uncertainty_samples"):
uncertainty_samples=[float(i) for i in v.split(',')]
elif (k == "seasonality_prior_scale"):
seasonality_prior_scale=[float(i) for i in v.split(',')]
elif (k == "additional_regressors"):
additional_regressors=str(v)
elif (k == "holiday_country_name"):
holiday_country_name=v
elif (k == "holiday_years"):
holiday_years=[int(i) for i in v.split(',')]
elif (k == "no_of_periods"):
no_of_periods=int(v)
elif (k == "pred_frequncy"):
pred_frequncy=v
else:
self.log.info("Invalid string.")
except Exception:
continue
try:
start = time.time()
datetime_col_name=str(datetime_col_name)
target_col=str(self.targetFeature)
#extra_regressors=additional_regressors
reg_list=self.regressor_list(additional_regressors)
get_dtcol=""
get_dtcol=self.get_datetime_col(self.data)[0]
#get predict frequency for user data
pred_freq= str(self.get_predict_frequency(self.data,datetime_col_name))
if (pred_frequncy):
pred_frequncy=pred_frequncy
else:
#If user not defined predict_freq in aion config or GUI, our algorithm will find automatically by get_predict_frequency() method
pred_frequncy=pred_freq
self.log.info("Auto Predict frequency period (Hour-H/Day-D/Week-W/Month-M/Year-Y): \\n"+str(pred_frequncy))
#For proper datetime format check.
self.data[self.dateTimeFeature] = pd.to_datetime(self.data[self.dateTimeFeature])
filterd_df = self.data.filter([get_dtcol,target_col])
holiday = pd.DataFrame([])
holiday_specified=holidays.CountryHoliday(holiday_country_name,years=holiday_years)
for date, name in sorted(holiday_specified.items()):
holiday = holiday.append(pd.DataFrame({'ds': date, 'holiday': "Holidays"}, index=[0]), ignore_index=True)
holiday['ds'] = pd.to_datetime(holiday['ds'], format='%Y-%m-%d %H:%M:%S', errors='ignore')
filterd_df=filterd_df.rename(columns={self.dateTimeFeature:'ds',target_col:'y'})
#Set seasonality model
try:
if not seasonality_mode:
self.log.info('empty input for seasonality_mode parameter in aion configuration file.Please check. Setting default mode: additive. \\n')
seasonality_mode=[]
seasonality_mode=['additive']
multiplicative_s="multiplicative"
additive_s="additive"
else:
seasonality_mode = seasonality_mode.split(',')
len_seasonality_mode=len(seasonality_mode)
except ValueError as e:
self.log.info(e)
params_grid = {'seasonality_mode':(seasonality_mode),
'changepoint_prior_scale':changepoint_prior_scale,
'changepoint_range': changepoint_range,
'yearly_seasonality': [yearly_seasonality],
'weekly_seasonality': [weekly_seasonality],
'daily_seasonality': [daily_seasonality],
'mcmc_samples': mcmc_samples,
'interval_width': interval_width,
'holidays_prior_scale':holidays_prior_scale,
'n_changepoints' : n_changepoints,
'uncertainty_samples': uncertainty_samples,
'seasonality_prior_scale': seasonality_ |
prior_scale}
grid = ParameterGrid(params_grid)
p_cnt = 0
for p in grid:
p_cnt = p_cnt+1
self.log.info("--------------- Total Possible prophet iterations: --------------- \\n")
self.log.info(p_cnt)
self.log.info("\\n--------------- Modal Validation Start ---------------")
size = int(len(filterd_df) * (100 - self.testpercentage)/100)
train = filterd_df.loc[0:size]
valid = filterd_df.loc[size:len(filterd_df)]
self.log.info("------->Train Data Shape: "+str(train.shape))
self.log.info("------->Valid Data Shape"+str(valid.shape))
X_train = train
X_test = valid
len_test=len(X_test)
#For add_regressor,copy the add_regressor columns to use.
if (additional_regressors):
df1=pd.DataFrame()
df1[additional_regressors]=self.data[additional_regressors]
model_parameters_mape = pd.DataFrame(columns = ['MAPE','Parameters'])
model_parameters_rmse = pd.DataFrame(columns = ['rmse','Parameters'])
model_parameters_mse = pd.DataFrame(columns = ['mse','Parameters'])
model_parameters_mae = pd.DataFrame(columns = ['MAE','Parameters'])
model_parameters_r2 = pd.DataFrame(columns = ['r2','Parameters'])
for P in grid:
pred_forecast = pd.DataFrame()
random.seed(0)
train_model =Prophet(changepoint_prior_scale = P['changepoint_prior_scale'],
seasonality_mode=P['seasonality_mode'],
changepoint_range=P['changepoint_range'],
holidays_prior_scale = P['holidays_prior_scale'],
n_changepoints = P['n_changepoints'],
mcmc_samples=P['mcmc_samples'],
interval_width=P['interval_width'],
uncertainty_samples=P['uncertainty_samples'],
seasonality_prior_scale= P['seasonality_prior_scale'],
holidays=holiday,
weekly_seasonality=P['weekly_seasonality'],
daily_seasonality = P['daily_seasonality'],
yearly_seasonality = P['yearly_seasonality']
)
train_forecast=pd.DataFrame()
try:
train_model.fit(X_train)
train_forecast = train_model.make_future_dataframe(periods=len_test, freq=pred_frequncy,include_history = False)
train_forecast = train_model.predict(train_forecast)
except ValueError as e:
self.log.info(e)
self.log.info ("------->Check mcmc_samples value in aion confiuration, either 0 (default) or defined value,e.g.mcmc_samples:'300' to be set.If no idea on value, set to default.\\n")
pred_forecast=train_forecast[['ds','yhat']]
Actual=X_test
len_act=len(Actual['y'])
len_pred=len(pred_forecast['yhat'])
MAPE = self.mean_absolute_percentage_error(Actual['y'],abs(pred_forecast['yhat']))
model_parameters_mape = model_parameters_mape.append({'MAPE':MAPE,'Parameters':p},ignore_index=True)
#MAE
MAE = mean_absolute_error(Actual['y'],abs(pred_forecast['yhat']))
rmse = sqrt(mean_squared_error(Actual['y'],abs(pred_forecast['yhat'])))
mse = mean_squared_error(Actual['y'],abs(pred_forecast['yhat']))
r2 = r2_score(Actual['y'],abs(pred_forecast['yhat']))
# self.log.info ("------->Prophet RMSE :"+str(rmse))
# self.log.info ("------->Prophet MSE :"+str(mse))
# self.log.info ("------->Prophet MAE :"+str(MAE))
# self.log.info ("------->Prophet R2 :"+str(r2))
model_parameters_mape = model_parameters_mape.append({'MAPE':MAPE,'Parameters':p},ignore_index=True)
model_parameters_rmse = model_parameters_rmse.append({'rmse':rmse,'Parameters':p},ignore_index=True)
model_parameters_mse = model_parameters_mse.append({'mse':mse,'Parameters':p},ignore_index=True)
model_parameters_mae = model_parameters_mae.append({'MAE':MAE,'Parameters':p},ignore_index=True)
model_parameters_r2 = model_parameters_r2.append({'r2':r2,'Parameters':p},ignore_index=True)
#end of for loop
parameters_mape = model_parameters_mape.sort_values(by=['MAPE'])
parameters_mape = parameters_mape.reset_index(drop=True)
best_params_mape=parameters_mape['Parameters'][0]
# print("Best Parameters on which the model has the least MAPE is: \\n",best_params_mape)
best_mape_score=parameters_mape['MAPE'].iloc[0]
#self.log.info('------->Mean absolute percent error log: \\n ')
#self.log.info('------->best_mape_score: \\n '+str(best_mape_score))
parameters_rmse = model_parameters_rmse.sort_values(by=['rmse'])
parameters_rmse = parameters_rmse.reset_index(drop=True)
best_params_rmse=parameters_rmse['Parameters'][0]
best_rmse_score=parameters_rmse['rmse'].iloc[0]
#self.log.info('------->Root Man Squared Error log (Prophet timeseries): \\n ')
#self.log.info('------->best_rmse_score ((Prophet timeseries)): \\n '+str(best_rmse_score))
#mse
parameters_mse = model_parameters_mse.sort_values(by=['mse'])
parameters_mse = parameters_mse.reset_index(drop=True)
best_params_mse = parameters_mse['Parameters'][0]
best_mse_score=parameters_mse['mse'].iloc[0]
#MAE
parameters_mae = model_parameters_mae.sort_values(by=['MAE'])
parameters_mae = parameters_mae.reset_index(drop=True)
best_params_mae = parameters_mae['Parameters'][0]
best_mae_score=parameters_mae['MAE'].iloc[0]
# R2 score
parameters_r2 = model_parameters_r2.sort_values(by=['r2'])
parameters_r2 = parameters_r2.reset_index(drop=False)
best_params_r2 = parameters_r2['Parameters'][0]
best_r2_score=parameters_r2['r2'].iloc[0]
#Final best prophet mse,rmse,mape scores
# self.log.info ("------->Prophet RMSE :"+str(best_rmse_score))
# self.log.info ("------->Prophet MSE :"+str(best_mse_score))
# self.log.info ("------->Prophet MAE :"+str(best_mae_score))
# self.log.info ("------->Prophet R2 :"+str(best_r2_score))
#Extracting best model parameters
for k,v in best_params_mape.items():
try:
if (k == "changepoint_prior_scale"):
changepoint_prior_scale=float(v)
elif (k == "changepoint_range"):
changepoint_range=float(v)
elif (k == "daily_seasonality"):
daily_seasonality=v
elif (k == "holidays_prior_scale"):
holidays_prior_scale=float(v)
elif (k == "interval_width"):
interval_width=float(v)
elif (k == "mcmc_samples"):
mcmc_samples=float(v)
elif (k == "n_changepoints"):
n_changepoints=int(v)
elif (k == "seasonality_mode"):
seasonality_mode=str(v)
elif (k == "seasonality_prior_scale"):
seasonality_prior_scale=int(v)
elif (k == "uncertainty_samples"):
uncertainty_samples=float(v)
elif (k == "weekly_seasonality"):
weekly_seasonality=v
elif (k == "yearly_seasonality"):
yearly_seasonality=v
else:
pass
except Exception as e:
self.log.info("\\n prophet time series config param parsing error"+str(e))
#continue
self.log.info("\\n Best prophet model accuracy parameters.\\n ")
#Prophet model based on mape best params.
best_prophet_model = Prophet(holidays=holiday,
changepoint_prior_scale= changepoint_prior_scale,
holidays_prior_scale = holidays_prior_scale,
n_changepoints = n_changepoints,
seasonality_mode = seasonality_mode,
weekly_seasonality= weekly_seasonality,
daily_seasonality = daily_seasonality,
yearly_seasonality = yearly_seasonality,
interval_width=interval_width,
mcmc_samples=mcmc_samples,
changepoint_range=changepoint_range)
# If holiday not set using prophet model,we can add as below.
# best_prophet_model.add_country_holidays(country_name=holiday_country_name)
#prophet add_regressor ,adding additional influencer (regressor) features, but it different from multivariant model.
if (additional_regressors):
filterd_df[additional_regressors] = df1[additional_regressors]
filterd_df.reset_index(drop=True)
for v in reg_list:
best_prophet_model=best_prophet_model.add_regressor(v)
#best_prophet_model.fit(X_train)
else:
pass
#Model prophet fit, it should be done before make_future_dataframe
best_prophet_model.fit(filterd_df)
future = best_prophet_model.make_future_dataframe(periods=no_of_periods, freq=pred_frequncy,include_history = False)
if (additional_regressors):
future[additional_regressors] = filterd_df[additional_regressors]
future.reset_index(drop=True)
future=future.dropna()
else:
pass
#Final prediction
forecast = best_prophet_model.predict(future)
# forecast_df=forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']]
# #Save forecast as csv file
# forecast_df.to_csv(r"prophet_realtime_user_steps.csv",index = False, header=True)
#Plot the predition and save in file
forecast_plot = best_prophet_model.plot(forecast)
imagefilename = os.path.join(dataFolderLocation,'log','img','prophet_fig.png')
forecast_plot.savefig(imagefilename)
#The below part is used to compare prophet predicted with actual value
#For train data
#Prophet model with train and test data, based on mape best params.
best_prophet_model_new = Prophet(holidays=holiday,
changepoint_prior_scale= changepoint_prior_scale,
holidays_prior_scale = holidays_prior_scale,
n_changepoints = n_changepoints,
seasonality_mode = seasonality_mode,
weekly_seasonality= weekly_seasonality,
daily_seasonality = daily_seasonality,
yearly_seasonality = yearly_seasonality,
interval_width=interval_width,
mcmc_samples=mcmc_samples,
changepoint_range=changepoint_range)
fp_forecast=pd.DataFrame()
try:
best_prophet_model_new.fit(X_train)
fp_forecast = best_prophet_model_new.make_future_dataframe(periods=len_test, freq=pred_frequncy,include_history = False)
fp_forecast = best_prophet_model_new.predict(fp_forecast)
except ValueError as e:
self.log.info(e)
self.log.info ("------->Check mcmc_samples value in aion confiuration, either 0 (default) or defined value,e.g.mcmc_samples:'300' to be set.If no idea on value, set to default.\\n")
pred_forecast=fp_forecast[['ds','yhat']]
pred_forecast['ds']=Actual['ds'].to_numpy()
Actual.ds = pd.to_datetime(Actual.ds)
pred_forecast.ds = pd.to_datetime(pred_forecast.ds)
MAE = mean_absolute_error(Actual['y'],abs(pred_forecast['yhat']))
rmse = sqrt(mean_squared_error(Actual['y'],abs(pred_forecast['yhat'])))
mse = mean_squared_error(Actual['y'],abs(pred_forecast['yhat']))
r2 = r2_score(Actual['y'],abs(pred_forecast['yhat']))
MAPE = self.mean_absolute_percentage_error(Actual['y'],abs(pred_forecast['yhat']))
#Final best prophet mse,rmse,mape scores
self.log.info ("------->Prophet RMSE : "+str(rmse))
self.log.info ("------->Prophet MSE : "+str(mse))
self.log.info ("------->Prophet MAE : "+str(MAE))
self.log.info ("------->Prophet R2 : "+str(r2))
self.log.info("------->Prophet MAPE: "+str(MAPE))
#self.log.info(MAPE)
#self.log.info('------->best_mape_score: \\n '+str(best_mape_score))
prophet_df = pd.merge(Actual,pred_forecast, on=['ds'], how='left')
cols = ['ds','y','yhat']
prophet_df_new = prophet_df[cols]
prophet_df_new.dropna(inplace=True)
actualfeature = target_ |
col+'_actual'
predictfeature = target_col+'_pred'
prophet_df_new=prophet_df_new.rename(columns={'ds': 'datetime', 'y': actualfeature,'yhat': predictfeature})
#prophet_df_new.to_csv(predicted_data_file)
#cv_results = cross_validation( model = best_prophet_model, initial = pd.to_timedelta(no_of_periods,unit=pred_frequncy), horizon = pd.to_timedelta(no_of_periods,unit=pred_frequncy))
#forecast_df=forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']]
#Save forecast as csv file
#forecast_df.to_csv(r"prophet_realtime_Output.csv",index = False, header=True)
# self.log.info('------->Prophet time series forecast (last 7 prediction for user view): \\n ')
# self.log.info(forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail(7))
plot_prd=plot_plotly(best_prophet_model, forecast)
imagefilename = os.path.join(dataFolderLocation,'log','img','1_ppm_plot')
plotly.offline.plot(plot_prd, filename=imagefilename,auto_open = False)
plot_prd_components=plot_components_plotly(best_prophet_model, forecast)
imagefilename = os.path.join(dataFolderLocation,'log','img','2_ppm_plot')
plotly.offline.plot(plot_prd_components, filename=imagefilename,auto_open = False)
executionTime=(time.time() - start)
self.log.info('-------> Time: '+str(executionTime))
return best_prophet_model,best_mae_score,best_rmse_score,best_mse_score,best_mape_score,best_r2_score,pred_frequncy,additional_regressors,prophet_df_new
except Exception as inst:
#print("********** aion_fbprophet exception ************* \\n")
self.log.info('<!------------- Prophet Execute Error ---------------> '+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
# For timeseries pyramid pdaarima module
from pmdarima.arima import auto_arima
import pmdarima as pm
import json
#Python sklearn & std libraries
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import VarianceThreshold
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
#from sklearn.metrics import mean_absolute_percentage_error
from sklearn.linear_model import LinearRegression
from math import sqrt
import warnings
# For serialization.
#from sklearn.externals import joblib
import pickle
import os,sys
# For ploting (mathlab)
import matplotlib.pyplot as plt
#Import eion config manager module
import logging
from sklearn import metrics
from sklearn.metrics import accuracy_score
import time
import os
import sys
# Eion arima module
class eion_arima ():
#Constructor
def __init__(self,configfile,testpercentage,sesonalityChecks,stationaryChecks): # eaobj - eion arima class object
try:
tsarima_params = configfile
self.testpercentage = testpercentage
self.start_p= int(tsarima_params['start_p'])
self.start_q= int(tsarima_params['start_q'])
self.max_p= int(tsarima_params['max_p'])
self.max_q= int(tsarima_params['max_q'])
self.max_d= int(tsarima_params['max_d'])
self.max_order= int(tsarima_params['max_order'])
self.start_Q= int(tsarima_params['start_Q'])
self.max_P= int(tsarima_params['max_P'])
self.max_D= int(tsarima_params['max_D'])
self.max_Q= int(tsarima_params['max_Q'])
self.m= int(tsarima_params['m'])
self.start_P= int(tsarima_params['start_P'])
self.seasonal= tsarima_params['seasonal']
#self.seasonal= sesonalityChecks
self.stationary=stationaryChecks
#print("self.seasonal: \\n",self.seasonal)
#print("self.stationary: \\n",self.stationary)
if self.seasonal and not self.seasonal.isspace():
if (self.seasonal.lower() == 'true'):
self.seasonal=True
elif (self.seasonal.lower() == 'false'):
self.seasonal=False
else:
self.seasonal=True
else:
self.seasonal=True
self.d= int(tsarima_params['d'])
self.D= int(tsarima_params['D'])
#self.trace= tsarima_params['trace']
self.error_action= tsarima_params['error_action']
self.suppress_warnings= tsarima_params['suppress_warnings']
self.stepwise= tsarima_params['stepwise']
#self.random= tsarima_params['random']
self.log = logging.getLogger('eion')
except Exception as inst:
self.log.info('<!------------- Arima INIT Error ---------------> ')
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def mean_absolute_percentage_error(self,y_true, y_pred):
try:
y_true, y_pred=np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true+sys.float_info.epsilon)) * 100
except Exception as inst:
self.log.info('<------------- mean_absolute_percentage_error ---------------> ')
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def eion_arima(self,train_data):
try:
start = time.time()
auto_arima_stepwise_fit = pm.auto_arima(train_data, start_p=self.start_p, start_q=self.start_q,max_p=self.max_p, max_q=self.max_q,max_d=self.max_d,max_P=self.max_P,max_D=self.max_D,max_Q=self.max_Q,max_order=self.max_order, m=self.m,start_P=self.start_P,start_Q=self.start_Q, seasonal=self.seasonal,stationary=self.stationary,d=self.d, D=self.D,error_action=self.error_action,suppress_warnings=self.suppress_warnings,stepwise=self.stepwise)
#auto_arima_stepwise_fit = pm.auto_arima(train_data, start_p=self.start_p, start_q=self.start_q,max_p=self.max_p, max_q=self.max_q,max_d=self.max_d,max_P=self.max_P,max_D=self.max_D,max_Q=self.max_Q,max_order=self.max_order, m=self.m,start_P=self.start_P,start_Q=self.start_Q, seasonal=True,stationary=True,d=self.d, D=self.D,error_action=self.error_action,suppress_warnings=self.suppress_warnings,random_state=20,stepwise=True)
aic_score = auto_arima_stepwise_fit.aic()
self.log.info('------->AIC Score: '+str(aic_score))
self.log.info('\\n--------- Fit Summary --------------')
self.log.info (auto_arima_stepwise_fit.summary())
self.log.info('--------- Fit Summary End--------------\\n')
self.log.info("\\n--------------- Modal Validation Start ---------------")
size = int(len(train_data) * (100 - self.testpercentage)/100)
train = train_data.loc[0:size]
valid = train_data.loc[size:len(train_data)]
# valid_perc=((100-self.testpercentage)/100)
# valid_perc=round(valid_perc, 1)
# print("valid_perc: \\n", valid_perc)
self.log.info("------->Train Data Shape: "+str(train.shape))
self.log.info("------->Valid Data Shape"+str(valid.shape))
start1=len(train)
end1=len(train_data)
modelfit = auto_arima_stepwise_fit.fit(train)
a_prediction = auto_arima_stepwise_fit.predict(valid.shape[0])
#a_prediction = auto_arima_stepwise_fit.predict(n_periods=len(valid))
#a_prediction = auto_arima_stepwise_fit.predict(start=start1,end=end1)
#print("a_prediction: \\n",a_prediction)
#self.log.info(a_prediction)
mae = metrics.mean_absolute_error(valid, a_prediction)
self.log.info ("------->MAE: "+str(mae))
mape = self.mean_absolute_percentage_error(valid, a_prediction)
#mape=np.mean(np.abs((valid - a_prediction) / valid)) * 100
self.log.info ("------->MAPE :"+str(mape))
#RMSE
rmse = sqrt(mean_squared_error(valid,a_prediction))
mse = mean_squared_error(valid,a_prediction)
self.log.info ("------->RMSE :"+str(rmse))
self.log.info ("------->MSE :"+str(mse))
from sklearn.metrics import r2_score
r2 = r2_score(valid,a_prediction)
########### End ####################
# now we have the model
auto_arima_stepwise_fit.fit(train_data)
self.log.info("------------- Validate Model End----------------\\n")
executionTime=time.time() - start
self.log.info('-------> Time: '+str(executionTime)+'\\n')
return auto_arima_stepwise_fit,mae,rmse,mse,r2,aic_score,mape,valid,a_prediction
except Exception as inst:
self.log.info('<!------------- Arima Execute Error ---------------> '+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import json
#Python sklearn & std libraries
import numpy as np
import pandas as pd
from time_series.ts_arima_eion import eion_arima
from time_series.aion_fbprophet import aion_fbprophet
from time_series.timeseriesDLUnivariate import timeseriesDLUnivariate
from time_series.timeseriesDLMultivariate import timeseriesDLMultivariate
from time_series.tsDLMultiVrtInUniVrtOut import tsDLMultiVrtInUniVrtOut
from statsmodels.tsa.vector_ar.vecm import coint_johansen
from statsmodels.tsa.vector_ar.var_model import VAR
from math import *
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from math import sqrt
import logging
import os
import sys
import time
import pickle
from statsmodels.tsa.arima_model import ARIMA
from sklearn.metrics import mean_squared_error
from statsmodels.tsa.stattools import adfuller
import pmdarima as pm
from statsmodels.tsa.stattools import grangercausalitytests
from statsmodels.stats.stattools import durbin_watson
from time_series.ts_modelvalidation import timeseriesModelTests
from sklearn.utils import check_array
from time_series.tsStationarySeasonalityTest import tsStationarySeasonalityTest
class timeseries():
def __init__(self,ts |
Config,modelconfig,modelList,data,targetFeature,dateTimeFeature,modelName,trainPercentage,usecasename,version,deployLocation,scoreParam):
self.tsConfig = tsConfig
self.modelconfig = modelconfig
self.modelList = modelList
self.data = data
self.data1=data
self.pred_freq = ''
self.additional_regressors=''
self.trainPercentage = trainPercentage
self.targetFeature = targetFeature
self.dateTimeFeature = dateTimeFeature
self.modelName=modelName
self.usecasename=usecasename
self.model_fit=None
self.selectedColumns = ''
self.version=version
self.deployLocation=deployLocation
self.dictDiffCount={}
self.log = logging.getLogger('eion')
self.scoreParam=str(scoreParam)
try:
##For bug:12280
self.data.dropna(how='all',axis=1,inplace=True)
except Exception as e:
self.data.fillna(0)
self.log.info("data empty feature process error info:, check any text column contain empty records. if yes, please remove the column and upload the data for time series forecasting. \\n"+str(e))
def var_prediction(self,no_of_prediction):
tdata = self.data.drop([self.dateTimeFeature], axis=1)
tdata.index = self.data[self.dateTimeFeature]
lag_order = self.model_fit.k_ar
predictions = self.model_fit.forecast(tdata.values[-lag_order:],steps=no_of_prediction)
predictions = predictions.round(2)
col = self.targetFeature.split(",")
pred = pd.DataFrame(index=range(0,len(predictions)),columns=col)
for j in range(0,len(col)):
for i in range(0, len(predictions)):
pred.iloc[i][j] = predictions[i][j]
predictions = pred
pred=self.invertTransformation(tdata,self.targetFeature,predictions,self.dictDiffCount)
return pred
def save_dl_model(self,smodel,scaler_model):
try:
saved_model = self.usecasename+'_'+self.version
filename = os.path.join(self.deployLocation,'model',saved_model)
smodel.save(filename)
if scaler_model != 'NA' and scaler_model != '':
scaler_filename = os.path.join(self.deployLocation,'model',saved_model+'_scaler.pkl')
with open(scaler_filename, 'wb') as f:
pickle.dump(scaler_model,f)
f.close()
else:
scaler_filename = 'NA'
return filename,saved_model,scaler_filename
except Exception as e:
print(e)
def save_model(self,smodel):
try:
saved_model = self.usecasename+'_'+self.version+'.sav'
filename = os.path.join(self.deployLocation,'model',saved_model)
with open(filename, 'wb') as f:
pickle.dump(smodel,f)
f.close()
return filename,saved_model
except Exception as e:
print(e)
def mean_absolute_percentage_error(self,y_true, y_pred):
try:
y_true, y_pred=np.array(y_true), np.array(y_pred)
mape=np.mean(np.abs((y_true - y_pred) / y_true+sys.float_info.epsilon)) * 100
return mape
except Exception as inst:
self.log.info('------------- mean_absolute_percentage_error ---------------')
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
## Fbprophet model
def getfbprophetmodel(self,predicted_data_file,dataFolderLocation,tFeature):
try:
modelName='fbprophet'
modelconfig = self.modelconfig['fbprophet']
self.targetFeature=tFeature[0]
X_Train = pd.DataFrame(self.data[self.targetFeature])
try:
# self.data[self.dateTimeFeature] = pd.to_datetime(self.data[self.dateTimeFeature],errors='coerce')
##For bug 13513 - If the datetime needs UTC timestamp process, except part will handle.
try:
#for non utc timestamp
self.data[self.dateTimeFeature] = pd.to_datetime(self.data[self.dateTimeFeature],errors='coerce')
except:
#for utc timestamp
self.data[self.dateTimeFeature] = pd.to_datetime(self.data[self.dateTimeFeature],errors='coerce',utc=True)
self.data = self.data.dropna()
except:
pass
aion_prophet_obj = aion_fbprophet(modelconfig,self.trainPercentage,self.data,self.targetFeature,self.dateTimeFeature)
self.log.info('Status:- |... TimeSeries Algorithm applied: FBPROPHET')
self.model_fit,mae,rmse_prophet,mse,mape,r2,pred_freq,additional_regressors,prophet_df_new = aion_prophet_obj.aion_probhet(X_Train,self.dateTimeFeature,predicted_data_file,dataFolderLocation)
## Added for additional scoring params
if (self.scoreParam.lower() == "r2"):
scoringparam_v=r2
self.log.info("fbprophet User selected scoring parameter is r2. r2 value: "+str(r2))
elif (self.scoreParam.lower() == "rmse"):
scoringparam_v=rmse_prophet
self.log.info("fbprophet User selected scoring parameter is RMSE. RMSE value: "+str(rmse_prophet))
elif (self.scoreParam.lower() == "mse"):
scoringparam_v=mse
self.log.info("fbprophet User selected scoring parameter is MSE. MSE value: "+str(mse))
elif (self.scoreParam.lower() == "mae"):
scoringparam_v=mae
self.log.info("fbprophet User selected scoring parameter is MAE. MAE value: "+str(mae))
else:
scoringparam_v=rmse_prophet
self.log.info('Status:- |... Score '+self.scoreParam.capitalize()+': '+str(round(scoringparam_v,2))) #task 11997 displaying user selected scoring parameter in status logs
error_matrix = '"RMSE":"'+str(round(rmse_prophet,2))+'","MAPE":"'+str(round(mape,2))+'","R2":"'+str(round(r2,2))+'","MAE":"'+str(round(mae,2))+'","MSE":"'+str(round(mse,2))+'"'
self.log.info("fbprophet all scoring parameter results: "+str(error_matrix))
scoredetails = '{"Model":"FBProphet ","Score":'+str(scoringparam_v)+',"Scoring Param": "'+str(self.scoreParam.lower())+'"}'
self.selectedColumns = self.targetFeature+','+self.dateTimeFeature
self.selectedColumns = self.selectedColumns.split(",")
self.pred_freq = pred_freq
self.additional_regressors=additional_regressors
self.log.info('------------- End FBPROPHET Model -------------\\n')
return('Success',modelName.upper(),self.scoreParam.lower(),scoringparam_v,self.model_fit,self.selectedColumns,error_matrix,scoredetails,self.dictDiffCount,self.pred_freq,self.additional_regressors,prophet_df_new)
except Exception as e:
self.log.info("FBProphet operation failed. error: "+str(e))
return('Error',modelName.upper(),self.scoreParam.lower(),0,None,self.selectedColumns,'','{}',self.dictDiffCount,self.pred_freq,self.additional_regressors,pd.DataFrame())
## Arima model
def get_arima_values(self):
try:
tFeature = self.targetFeature.split(',')
if(len(tFeature) == 1):
model_name = 'arima'
else:
self.log.info("Note: ARIMA model is going to perform only on first feature of provided target features due to data not met the VAR model constraints")
self.targetFeature=tFeature[0]
sesonalityChecks=True
stationaryChecks=False
#start checking sessonality using ch test and ocsb
self.log.info(self.data.head(5))
res = pm.arima.nsdiffs(self.data[self.targetFeature], m=355, max_D=5, test="ch") # 365 since daily
self.log.info('-------> Seasonality checks: %f' % res)
if res >=4:
self.log.info("-----------> Data is following Seasonality ")
self.log.info('Status:- |... Seasonality Check Done. Data is following Seasonality ')
sesonalityChecks=True
else:
self.log.info("-----------> Data is not following Seasonality ")
self.log.info('Status:- |... Seasonality Check Done. Data is not following Seasonality')
sesonalityChecks=False
# end checking sessonality using ch test and ocsb
# start checking stationary data for time Series
series=self.data[self.targetFeature]
adf_test = pm.arima.ADFTest(alpha=0.05)
resultSt = adfuller(self.data[self.targetFeature])
self.log.info('ADF Statistic: %f' % resultSt[0])
self.log.info('p-value: %f' % resultSt[1])
if resultSt[1]<= 0.05:
stationaryChecks=True
self.log.info("the data does not have a unit root and is stationary.")
self.log.info('Status:- |... Stationary Check Done. Data is stationary')
else:
stationaryChecks=False
self.log.info("the data has a unit root and is non-stationary.")
self.log.info('Status:- |... Stationary Check Done. Data is non-stationary')
# End of stationary checks
self.log.info('\\n------------- Start Arima Model -------------')
self.log.info('-------> Top 5 Rows: ')
self.log.info(self.data.head(5))
eion_arima_obj = eion_arima(self.modelconfig['arima'],self.trainPercentage,sesonalityChecks,stationaryChecks)
return 'Success',eion_arima_obj
except Exception as e:
self.log.info('<!------------- Get ARIMA Values ---------------> '+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
return 'Error',None
def getEncDecLSTMMultVrtInUniVrtOut(self):
try:
self.log.info('Status:- |... TimeSeries Algorithm applied: Encoder Decoder LSTM')
modelName='encoder_decoder_lstm_mvi_uvo'
modelconfig = self.modelconfig['encoder_decoder_lstm_mvi_uvo']
df = self.data
targetFeature = list(self.targetFeature.split(","))
try:
# df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce')
##For bug 13513 - If the datetime needs UTC timestamp process, except part will handle.
try:
#for non utc timestamp
df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce')
except:
#for utc timestamp
df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce',utc=True)
df = df.dropna()
except:
pass
df = df.groupby(self.dateTimeFeature).mean()
df = df.reset_index()
tdata = df.drop([self.dateTimeFeature], axis=1)
tdata.index = df[self.dateTimeFeature]
#tdata = tdata[tdata.columns[tdata.columns.isin(targetFeature)]]
#selectedColumns = self.targetFeature+','+self.dateTimeFeature
#selectedColumns = selectedColumns.split(",")
selectedColumns = tdata.columns
df_predicted=None
aion_dlts_obj = tsDLMultiVrtInUniVrtOut(modelconfig,self.trainPercentage,targetFeature,self.dateTimeFeature)
status,mse,rmse,r2,mae,model,df_predicted,lag_order,scaler = aion_dlts_obj.lstm_encdec_mvin_uvout(tdata)
if status.lower() == 'success':
## Added for additional scoring params
if (self.scoreParam.lower() == "r2"):
scoringparam_v=r2
self.log.info('Status:- |... Score R2(Avg) '+str(r2))
elif (self.scoreParam.lower() == "rmse"):
scoringparam_v=rmse
self.log.info("Status:- |... Score RMSE(Avg) "+str(rmse))
elif (self.scoreParam.lower() == "mse"):
scoringparam_v=mse
self.log.info("Status:- |... Score MSE(Avg) "+str(mse))
elif (self.scoreParam.lower() == "mae"):
scoringparam_v=mae
self.log.info("Status:- |... Score MAE(Avg) : "+str(mae))
else:
scoringparam_v=rmse
error_matrix = '"RMSE":"'+str(round(rmse,2))+'","MSE":"'+str(round(mse,2))+'"'
error_matrix=error_matrix+',"R2":"'+str(round(r2,2))+'","MAE":"'+str(round(mae,2))+'"'
self.log.info("LSTM Multivariant Input Univariate Output all scoring param results: "+str(error_matrix))
self.log.info('Status:- |... Score '+self.scoreParam.capitalize()+': '+str(round(scoringparam_v,2))) #task 11997 displaying user selected scoring parameter in status logs
scoredetails = '{"Model":"LSTM Multivariant","Score":'+str(scoringparam_v)+',"Scoring Param": "'+str(self.scoreParam.lower())+'"}'
else:
return 'Error',modelName.upper(), |
self.scoreParam.lower(),'NA',None,selectedColumns,'','{}',pd.DataFrame(),lag_order,None
except Exception as e:
self.log.info("getEncDecLSTMMultVrtInUniVrtOut method error. Error msg: "+str(e))
return 'Error',modelName.upper(),self.scoreParam.lower(),'NA',None,selectedColumns,'','{}',pd.DataFrame(),lag_order,None
return 'Success',modelName.upper(),self.scoreParam.lower(),scoringparam_v,model,selectedColumns,error_matrix,scoredetails,df_predicted,lag_order,scaler
def getLSTMMultivariate(self):
try:
self.log.info('Status:- |... TimeSeries Algorithm applied: LSTM')
modelName='lstm'
modelconfig = self.modelconfig['lstm']
df = self.data
targetFeature = list(self.targetFeature.split(","))
try:
# df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce')
##For bug 13513 - If the datetime needs UTC timestamp process, except part will handle.
try:
#for non utc timestamp
df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce')
except:
#for utc timestamp
df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce',utc=True)
df = df.dropna()
except:
pass
df = df.groupby(self.dateTimeFeature).mean()
df = df.reset_index()
tdata = df.drop([self.dateTimeFeature], axis=1)
tdata.index = df[self.dateTimeFeature]
tdata = tdata[tdata.columns[tdata.columns.isin(targetFeature)]]
selectedColumns = self.targetFeature+','+self.dateTimeFeature
selectedColumns = selectedColumns.split(",")
df_predicted=None
aion_dlts_obj = timeseriesDLMultivariate(modelconfig,self.trainPercentage,targetFeature,self.dateTimeFeature)
status,mse,rmse,r2,mae,model,df_predicted,lag_order,scaler = aion_dlts_obj.lstm_multivariate(tdata)
if status.lower() == 'success':
## Added for additional scoring params
if (self.scoreParam.lower() == "r2"):
scoringparam_v=r2
self.log.info('Status:- |... Score R2(Avg) '+str(r2))
elif (self.scoreParam.lower() == "rmse"):
scoringparam_v=rmse
self.log.info("Status:- |... Score RMSE(Avg) "+str(rmse))
elif (self.scoreParam.lower() == "mse"):
scoringparam_v=mse
self.log.info("Status:- |... Score MSE(Avg) "+str(mse))
elif (self.scoreParam.lower() == "mae"):
scoringparam_v=mae
self.log.info("Status:- |... Score MAE(Avg) : "+str(mae))
else:
scoringparam_v=rmse
error_matrix = '"RMSE":"'+str(round(rmse,2))+'","MSE":"'+str(round(mse,2))+'"'
error_matrix=error_matrix+',"R2":"'+str(round(r2,2))+'","MAE":"'+str(round(mae,2))+'"'
self.log.info("LSTM Multivariant all scoring param results: "+str(error_matrix))
self.log.info('Status:- |... Score '+self.scoreParam.capitalize()+': '+str(round(scoringparam_v,2))) #task 11997 displaying user selected scoring parameter in status logs
scoredetails = '{"Model":"LSTM Multivariant","Score":'+str(scoringparam_v)+',"Scoring Param": "'+str(self.scoreParam.lower())+'"}'
else:
return 'Error',modelName.upper(),self.scoreParam.lower(),'NA',None,selectedColumns,'','{}',pd.DataFrame(),lag_order,None
except Exception as e:
self.log.info("getLSTMMultivariate method error. Error msg: "+str(e))
return 'Error',modelName.upper(),self.scoreParam.lower(),'NA',None,selectedColumns,'','{}',pd.DataFrame(),lag_order,None
return 'Success',modelName.upper(),self.scoreParam.lower(),scoringparam_v,model,selectedColumns,error_matrix,scoredetails,df_predicted,lag_order,scaler
def getUniVarientLSTMModel(self):
try:
self.log.info('Status:- |... TimeSeries Algorithm applied: LSTM')
modelName='lstm'
lstmconfig = self.modelconfig['lstm']
df = self.data
try:
# df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce')
##For bug 13513 - If the datetime needs UTC timestamp process, except part will handle.
try:
#for non utc timestamp
df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce')
except:
#for utc timestamp
df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce',utc=True)
df = df.dropna()
except:
pass
tdata = df.drop([self.dateTimeFeature], axis=1)
tdata.index = df[self.dateTimeFeature]
tdata = pd.DataFrame(tdata[self.targetFeature])
selectedColumns = self.targetFeature+','+self.dateTimeFeature
selectedColumns = selectedColumns.split(",")
aion_dlts_obj = timeseriesDLUnivariate(lstmconfig,self.trainPercentage,self.targetFeature,self.dateTimeFeature,modelName)
status,lstm_mse,lstm_rmse,r2,mae,lstm_model,df_predicted_lstm,lag_order,scaler = aion_dlts_obj.ts_lstm(tdata)
if status.lower() == 'success':
## Added for additional scoring params
if (self.scoreParam.lower() == "r2"):
scoringparam_v=r2
self.log.info("LSTM Univariant User selected scoring parameter is r2. r2 value: "+str(r2))
elif (self.scoreParam.lower() == "rmse"):
scoringparam_v=lstm_rmse
self.log.info("LSTM Univariant User selected scoring parameter is RMSE. Rmse value: "+str(lstm_rmse))
elif (self.scoreParam.lower() == "mse"):
scoringparam_v=lstm_mse
self.log.info("LSTM Univariant User selected scoring parameter is MSE. Mse value: "+str(lstm_mse))
elif (self.scoreParam.lower() == "mae"):
scoringparam_v=mae
self.log.info("LSTM Univariant User selected scoring parameter is MAE. Mae value: "+str(mae))
else:
scoringparam_v=lstm_rmse
error_matrix = '"RMSE":"'+str(round(lstm_rmse,2))+'","MSE":"'+str(round(lstm_mse,2))+'"'
error_matrix=error_matrix+',"R2":"'+str(round(r2,2))+'","MAE":"'+str(round(mae,2))+'"'
self.log.info("LSTM Univariant, all scoring param results: "+str(error_matrix))
self.log.info('Status:- |... Score '+self.scoreParam.capitalize()+': '+str(round(scoringparam_v,2))) #task 11997 displaying user selected scoring parameter in status logs
scoredetails = '{"Model":"LSTM Univariant","Score":'+str(scoringparam_v)+',"Scoring Param": "'+str(self.scoreParam.lower())+'"}'
return 'Success',modelName.upper(),self.scoreParam.lower(),scoringparam_v,lstm_model,selectedColumns,error_matrix,scoredetails,df_predicted_lstm,lag_order,scaler
else:
return 'Error',modelName.upper(),self.scoreParam.lower(),0,None,selectedColumns,'','{}',pd.DataFrame(),0,None
except Exception as inst:
self.log.info('<!------------- LSTM Error ---------------> '+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
return 'Error',modelName.upper(),self.scoreParam.lower(),0,None,selectedColumns,'','{}',pd.DataFrame(),0,None
def getUniVarientMLPModel(self):
try:
self.log.info('Status:- |... TimeSeries Algorithm applied: MLP')
modelName='mlp'
lstmconfig = self.modelconfig['mlp']
df = self.data
try:
# df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce')
##For bug 13513 - If the datetime needs UTC timestamp process, except part will handle.
try:
#for non utc timestamp
df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce')
except:
#for utc timestamp
df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce',utc=True)
df = df.dropna()
except:
pass
tdata = df.drop([self.dateTimeFeature], axis=1)
tdata.index = df[self.dateTimeFeature]
tdata = pd.DataFrame(tdata[self.targetFeature])
selectedColumns = self.targetFeature+','+self.dateTimeFeature
selectedColumns = selectedColumns.split(",")
aion_dlts_obj = timeseriesDLUnivariate(lstmconfig,self.trainPercentage,self.targetFeature,self.dateTimeFeature,modelName)
mlp_mse,mlp_rmse,r2,mae,mlp_model,df_predicted_mlp,look_back,scaler = aion_dlts_obj.mlpDL(tdata)
## Added for additional scoring params
if (self.scoreParam.lower() == "r2"):
scoringparam_v=r2
self.log.info("MLP Univariant User selected scoring parameter is R2. R2 value: "+str(r2))
elif (self.scoreParam.lower() == "rmse"):
scoringparam_v=mlp_rmse
self.log.info("MLP Univariant User selected scoring parameter is RMSE. Rmse value: "+str(mlp_rmse))
elif (self.scoreParam.lower() == "mse"):
scoringparam_v=mlp_mse
self.log.info("MLP Univariant User selected scoring parameter is MSE. Mse value: "+str(mlp_mse))
elif (self.scoreParam.lower() == "mae"):
scoringparam_v=mae
self.log.info("MLP Univariant User selected scoring parameter is MAE. Mae value: "+str(mae))
else:
scoringparam_v=mlp_rmse
error_matrix = '"RMSE":"'+str(round(mlp_rmse,2))+'","MSE":"'+str(round(mlp_mse,2))+'"'
error_matrix=error_matrix+',"R2":"'+str(round(r2,2))+'","MAE":"'+str(round(mae,2))+'"'
self.log.info("MLP Univariant, all scoring param results: "+str(error_matrix))
self.log.info('Status:- |... Score '+self.scoreParam.capitalize()+': '+str(round(scoringparam_v,2))) #task 11997 displaying user selected scoring parameter in status logs
scoredetails = '{"Model":"MLP","Score":'+str(scoringparam_v)+',"Scoring Param": "'+str(self.scoreParam.lower())+'"}'
return 'Success',modelName.upper(),self.scoreParam.lower(),scoringparam_v,mlp_model,selectedColumns,error_matrix,scoredetails,df_predicted_mlp,look_back,scaler
except Exception as inst:
import traceback
self.log.info("MLP Error in timeseries module: \\n"+str(traceback.print_exc()))
self.log.info('<!------------- MLP Error ---------------> ')
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
return 'Error',modelName.upper(),self.scoreParam.lower(),0,None,selectedColumns,'','{}',pd.DataFrame(),0,None
def getARIMAmodel(self,predicted_data_file):
try:
modelName='arima'
status,eion_arima_obj = self.get_arima_values()
self.log.info('Status:- |... TimeSeries Algorithm applied: ARIMA')
selected_feature_list = self.data[self.targetFeature].values
selected_feature_list = selected_feature_list.astype('int32')
self.log.info('-------> Target Feature First 5 Rows: ')
self.log.info(self.data[self.targetFeature].head(5))
X_Train = pd.DataFrame(self.data[self.targetFeature])
try:
# self.data[self.dateTimeFeature] = pd.to_datetime(self.data[self.dateTimeFeature],errors='coerce')
##For bug 13513 - If the datetime needs UTC timestamp process, except part will handle.
try:
#for non utc timestamp
self.data[self.dateTimeFeature] = pd.to_datetime(self.data[self.dateTimeFeature],errors='coerce')
except:
#for utc timestamp
self.data[self.dateTimeFeature] = pd.to_datetime(self.data[self.dateTimeFeature],errors='coerce',utc=True)
self.data = self.data.dropna()
except:
pass
if status.lower() == 'success':
self.model_fit,mae,rmse_arima,mse,r2,aic_score,mape,valid,pred = eion_arima_obj.eion_arima(X_Train)
## Added for additional scoring params
if (self.scoreParam.lower() == "r2"):
scoringparam_v=r2
self.log.info("ARIMA Univariant User selected scoring parameter is r2. r2 value: "+str(r2))
elif (self.scoreParam.lower() == "rmse"):
scoringparam_v |
=rmse_arima
self.log.info("ARIMA Univariant User selected scoring parameter is RMSE. RMSE value: "+str(rmse_arima))
elif (self.scoreParam.lower() == "mse"):
scoringparam_v=mse
sel |
.append(rmse_mlp)
modelScore.append(rmse_var)
if (min(modelScore) == rmse_arima and rmse_arima != 0xFFFF):
best_model='arima'
self.log.info('Status:- |... TimeSeries Best Algorithm: ARIMA')
return best_model
elif (min(modelScore) == rmse_prophet and rmse_prophet != 0xFFFF):
best_model='fbprophet'
self.log.info('Status:- |... TimeSeries Best Algorithm: FBPROPHET')
return best_model
elif (min(modelScore) == rmse_lstm and rmse_lstm != 0xFFFF):
best_model='lstm'
self.log.info('Status:- |... TimeSeries Best Algorithm: LSTM')
return best_model
elif (min(modelScore) == rmse_mlp and rmse_mlp != 0xFFFF):
best_model='mlp'
self.log.info('Status:- |... TimeSeries Best Algorithm: MLP')
return best_model
elif (min(modelScore) == rmse_var and rmse_var != 0xFFFF):
best_model='var'
self.log.info('Status:- |... TimeSeries Best Algorithm: VAR')
return best_model
else:
#'Both arima and fbprophet rmse are equal, so both models are performing equal.
## So, selecting arima as best one.
best_model='arima'
return best_model
## Selecting best model algorithm
def bestmodelProcess(self,modelNames,nfeatures,trained_data_file,tFeature,predicted_data_file,dataFolderLocation):
try:
best_model=''
lag_order = 1
predict_var=None
predict_arima=None
predict_lstm=None
predict_mlp=None
predict_fbprophet=None
modelNames = modelNames
modelNames=[x.lower() for x in modelNames]
inputFeature_len=nfeatures
status = 'Success'
if 'fbprophet' in modelNames:
status,modelName_prophet,fbprophet,rmse_prophet,fp_model_fit,selectedColumns_prophet,error_matrix_prophet,scoredetails_prophet,dictDiffCount_prophet,pred_freq_prophet,additional_regressors_prophet,predict_fbprophet = self.getfbprophetmodel(predicted_data_file,dataFolderLocation,tFeature)
if status.lower() == 'error':
self.log.info('-------------> FBPROPHET RMSE Score: Error')
if (self.scoreParam.lower() == 'r2'):
rmse_prophet = -0xFFFF
else:
rmse_prophet = 0xFFFF
else:
self.log.info("-------------> FBPROPHET RMSE Score:\\t"+str(round(rmse_prophet,2)))
else:
if (self.scoreParam.lower() == 'r2'):
rmse_prophet = -0xFFFF
else:
rmse_prophet = 0xFFFF
if 'arima' in modelNames:
status,modelName,aic,rmse_arima,ar_model_fit,selectedColumns,error_matrix,scoredetails,dictDiffCount,pred_freq,additional_regressors,rmse_arima_act,predict_arima = self.getARIMAmodel(predicted_data_file)
if status.lower() == 'error':
self.log.info('-------------> ARIMA RMSE Score: Error')
if (self.scoreParam.lower() == 'r2'):
rmse_arima = -0xFFFF
else:
rmse_arima = 0xFFFF
else:
self.log.info('-------------> ARIMA RMSE Score:\\t'+str(round(rmse_arima,2)))
else:
if (self.scoreParam.lower() == 'r2'):
rmse_arima = -0xFFFF ## -65535
else:
rmse_arima = 0xFFFF
if 'lstm' in modelNames:
if inputFeature_len == 1:
status,modelName_lstm,score_type,rmse_lstm,lstm_model_fit,lstm_selectedColumns,error_matrix_lstm,scoredetails_lstm,predict_lstm,lag_order,lstm_scaler = self.getUniVarientLSTMModel()
else:
status,modelName_lstm,score_type,rmse_lstm,lstm_model_fit,lstm_selectedColumns,error_matrix_lstm,scoredetails_lstm,predict_lstm,lag_order,lstm_scaler = self.getLSTMMultivariate()
if status.lower() == 'error':
self.log.info('-------------> LSTM RMSE Score: Error')
if (self.scoreParam.lower() == 'r2'):
rmse_lstm = -0xFFFF
else:
rmse_lstm = 0xFFFF
else:
self.log.info('-------------> LSTM RMSE Score:\\t'+str(round(rmse_lstm,2)))
else:
if (self.scoreParam.lower() == 'r2'):
rmse_lstm = -0xFFFF
else:
rmse_lstm = 0xFFFF
if 'mlp' in modelNames:
status,modelName_mlp,score_type,rmse_mlp,mlp_model_fit,mlp_selectedColumns,error_matrix_mlp,scoredetails_mlp,predict_mlp,lag_order,mlp_scaler = self.getUniVarientMLPModel()
if status.lower() == 'error':
self.log.info('-------------> MLP Score: Error')
if (self.scoreParam.lower() == 'r2'):
rmse_mlp = -0xFFFF
else:
rmse_mlp = 0xFFFF
else:
self.log.info('-------------> MLP RMSE Score:\\t'+str(round(rmse_mlp,2)))
else:
if (self.scoreParam.lower() == 'r2'):
rmse_mlp = -0xFFFF
else:
rmse_mlp = 0xFFFF
if 'var' in modelNames:
status,modelName_var,score_var_type,rmse_var,var_model,var_selectedColumns,error_matrix_var,scoredetails_var,predict_var,dictDiffCount,pred_freq,additional_regressors,lag_order = self.getVARmodel()
if status.lower() == 'error':
self.log.info('-------------> VAR Score: Error')
if (self.scoreParam.lower() == 'r2'):
rmse_var = -0xFFFF
else:
rmse_var = 0xFFFF
else:
if (self.scoreParam.lower() == 'r2'):
rmse_var = -0xFFFF
else:
rmse_var = 0xFFFF
best_model = self.getbestmodel(rmse_prophet,rmse_arima,rmse_lstm,rmse_mlp,rmse_var)
if (best_model.lower() == 'arima'):
self.log.info('Best model is ARIMA based on metric '+str(self.scoreParam.lower()))
predict_arima.to_csv(predicted_data_file)
filename,saved_model = self.save_model(ar_model_fit)
return best_model,modelName,aic,rmse_arima,ar_model_fit,selectedColumns,error_matrix,scoredetails,dictDiffCount,pred_freq,additional_regressors,filename,saved_model,lag_order,'NA'
elif (best_model.lower() == 'fbprophet'):
self.log.info('Best model is fbprophet based on metric '+str(self.scoreParam.lower()))
predict_fbprophet.to_csv(predicted_data_file)
filename,saved_model = self.save_model(fp_model_fit)
return best_model,modelName_prophet,fbprophet,rmse_prophet,fp_model_fit,selectedColumns_prophet,error_matrix_prophet,scoredetails_prophet,dictDiffCount_prophet,pred_freq_prophet,additional_regressors_prophet,filename,saved_model,lag_order,'NA'
elif (best_model.lower() == 'var'):
self.log.info('Best model is VAR based on metric '+str(self.scoreParam.lower()))
self.data.to_csv(trained_data_file)
predict_var.to_csv(predicted_data_file)
filename,saved_model = self.save_model(var_model)
return best_model,modelName_var,score_var_type,rmse_var,var_model,var_selectedColumns,error_matrix_var,scoredetails_var,dictDiffCount,pred_freq,additional_regressors,filename,saved_model,lag_order,'NA'
elif (best_model.lower() == 'lstm'):
self.log.info('Best model is LSTM based on metric '+str(self.scoreParam.lower()))
predict_lstm.to_csv(predicted_data_file)
filename,saved_model,scaler_model = self.save_dl_model(lstm_model_fit,lstm_scaler)
return best_model,modelName_lstm,score_type,rmse_lstm,lstm_model_fit,lstm_selectedColumns,error_matrix_lstm,scoredetails_lstm,self.dictDiffCount,self.pred_freq,self.additional_regressors,filename,saved_model,lag_order,scaler_model
elif (best_model.lower() == 'mlp'):
self.log.info('Best model is MLP based on metric '+str(self.scoreParam.lower()))
predict_mlp.to_csv(predicted_data_file)
filename,saved_model,scaler_model = self.save_dl_model(mlp_model_fit,mlp_scaler)
return best_model,modelName_mlp,score_type,rmse_mlp,mlp_model_fit,mlp_selectedColumns,error_matrix_mlp,scoredetails_mlp,self.dictDiffCount,self.pred_freq,self.additional_regressors,filename,saved_model,lag_order,scaler_model
else:
pass
except Exception as e:
self.log.info('Issue in running multi time series algorithm selection process..Please check the config params')
self.log.info('error: '+str(e))
#Method to determine seasonality and stationrity in the input data features. (Task:12622,12623)
def seasonality_stationarity_test(self):
##The below part is to test stationarity and sessonality in the given time series data based on statsmodels lib.
#self.data,self.targetFeature,self.dateTimeFeature
self.log.info("<-------------- Time series stationarity and seasonality test Started...---------------->\\n")
ts_sstest=tsStationarySeasonalityTest(self.data,self.deployLocation)
## Time series Stationary check
## Currently stationarity check method set as Augmented dickey fuller, but kpss method also implemented.
stationary_method='adfuller'
if (isinstance(self.targetFeature,list)):
target=self.targetFeature
pass
elif (isinstance(self.targetFeature,str)):
target=list(self.targetFeature.split(','))
stats_model,n_lags,p_value,stationary_result,stationary_combined_res=ts_sstest.stationary_check(target,self.dateTimeFeature,stationary_method)
## Time series Seasonality check
##Seasonal model default set as additive
seasonal_model="additive"
df,decompose_result_mult,seasonality_result,seasonality_combined_res=ts_sstest.seasonal_check(target,self.dateTimeFeature,seasonal_model)
self.log.info("<-------------- Time series stationarity and seasonality test completed.---------------->\\n")
return stationary_result,seasonality_result
#Main timeseries function.
def timeseries_learning(self,trained_data_file,predicted_data_file,dataFolderLocation):
dataFolderLocation=dataFolderLocation
lag_order = 1
# ##The below part is to test stationarity and sessonality in the given time series data based on statsmodels lib.
stationary_result,seasonality_result=self.seasonality_stationarity_test()
try :
tFeature = self.targetFeature.split(',')
lentFeature=len(tFeature)
try:
if lentFeature > 1:
if any('timeseriesforecasting' in x.lower() for x in self.modelName): #task 11997
self.modelName.remove('timeseriesforecasting')
if 'arima' in self.modelName:
self.log.info('Status:- |... TimeSeries algorithm ARIMA not supported for multiple features')
self.modelName.remove('arima')
if 'fbprophet' in self.modelName:
self.log.info('Status:- |... TimeSeries algorithm FBPROPHET not supported for multiple features')
self.modelName.remove('fbprophet')
if 'mlp' in self.modelName:
self.log.info('Status:- |... TimeSeries algorithm MLP not supported for multiple features')
self.modelName.remove('mlp')
if len(self.modelName) == 0:
self.log.info('--------> Default Set to VAR')
self.modelName.append('var')
if lentFeature == 1:
if any('timeseriesforecasting' in x.lower() for x in self.modelName): #task 11997
self.modelName.remove('timeseriesforecasting')
if 'var' in self.modelName:
self.log.info('Status:- |... TimeSeries algorithm VAR not supported for single feature')
self.modelName.remove('var')
if len(self.modelName) == 0:
self.log.info('--------> Default Set to ARIMA,FBProphet')
self.modelName.append('arima')
except Exception as e:
self.log.info('input model name error: '+ str(e))
self.log.info("error in user selected model, may be wrong configuration, please check.")
if (len(self.modelName) > 1):
try:
self.log.info('User selected models: '+str(self.modelName))
best_model,modelName,score_type,score,model_fit,selectedColumns,error_matrix,scoredetails,dictDiffCount,pred_freq,additional_regressors,filename,saved_model,lag_order,scaler_transformation = self.bestmodel |
Process(self.modelName,lentFeature,trained_data_file,tFeature,predicted_data_file,dataFolderLocation)
return best_model,modelName,score_type,score,model_fit,selectedColumns,error_matrix,scoredetails,dictDiffCount,pred_freq,additional_regressors,filename,saved_model,lag_order,scaler_transformation
except Exception as e:
self.log.info('multi model timeseries processing error '+str(e))
else:
self.modelName = self.modelName[0]
## Normal arima ,var or fbprophet model call (user selects only one model at a time)
if self.modelName.lower() == 'fbprophet':
try:
model_name='fbprophet'
status,modelName,fbprophet,rmse_prophet,fp_model_fit,selectedColumns,error_matrix,scoredetails,dictDiffCount,pred_freq,additional_regressors,predict_output = self.getfbprophetmodel(predicted_data_file,dataFolderLocation,tFeature)
if status.lower() == 'success':
predict_output.to_csv(predicted_data_file)
filename,saved_model = self.save_model(fp_model_fit)
return 'self.modelName',modelName,fbprophet,rmse_prophet,fp_model_fit,selectedColumns,error_matrix,scoredetails,dictDiffCount,pred_freq,additional_regressors,filename,saved_model,lag_order,'NA'
else:
raise Exception('Exception during model training')
except Exception as e:
self.log.info('fbprophet error....')
self.log.info(e)
elif self.modelName.lower() == 'encoder_decoder_lstm_mvi_uvo':
try:
status,modelName_lstm,score_type,rmse_lstm,lstm_model_fit,lstm_selectedColumns,error_matrix_lstm,scoredetails_lstm,predict_lstm,lag_order,lstm_scaler = self.getEncDecLSTMMultVrtInUniVrtOut()
if status.lower() == 'success':
predict_lstm.to_csv(predicted_data_file)
filename,saved_model,scaler_model = self.save_dl_model(lstm_model_fit,lstm_scaler)
return self.modelName,modelName_lstm,score_type,rmse_lstm,lstm_model_fit,lstm_selectedColumns,error_matrix_lstm,scoredetails_lstm,self.dictDiffCount,self.pred_freq,self.additional_regressors,filename,saved_model,lag_order,scaler_model
else:
raise Exception('Exception during model training')
except Exception as inst:
self.log.info('<!------------- LSTM Error ---------------> ')
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
elif self.modelName.lower() == 'lstm':
try:
if lentFeature == 1:
status,modelName_lstm,score_type,rmse_lstm,lstm_model_fit,lstm_selectedColumns,error_matrix_lstm,scoredetails_lstm,predict_lstm,lag_order,lstm_scaler = self.getUniVarientLSTMModel()
else:
status,modelName_lstm,score_type,rmse_lstm,lstm_model_fit,lstm_selectedColumns,error_matrix_lstm,scoredetails_lstm,predict_lstm,lag_order,lstm_scaler = self.getLSTMMultivariate()
if status.lower() == 'success':
predict_lstm.to_csv(predicted_data_file)
filename,saved_model,scaler_model = self.save_dl_model(lstm_model_fit,lstm_scaler)
return self.modelName,modelName_lstm,score_type,rmse_lstm,lstm_model_fit,lstm_selectedColumns,error_matrix_lstm,scoredetails_lstm,self.dictDiffCount,self.pred_freq,self.additional_regressors,filename,saved_model,lag_order,scaler_model
else:
raise Exception('Exception during model training')
except Exception as inst:
self.log.info('<!------------- LSTM Error ---------------> ')
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
elif self.modelName.lower() == 'mlp':
try:
status,modelName_mlp,score_type,rmse_mlp,mlp_model_fit,mlp_selectedColumns,error_matrix_mlp,scoredetails_mlp,predict_mlp,lag_order,mlp_scaler = self.getUniVarientMLPModel()
if status.lower() == 'success':
predict_mlp.to_csv(predicted_data_file)
filename,saved_model,scaler_model = self.save_dl_model(mlp_model_fit,mlp_scaler)
return self.modelName,modelName_mlp,score_type,rmse_mlp,mlp_model_fit,mlp_selectedColumns,error_matrix_mlp,scoredetails_mlp,self.dictDiffCount,self.pred_freq,self.additional_regressors,filename,saved_model,lag_order,scaler_model
else:
raise Exception('Exception during model training')
except Exception as inst:
self.log.info('<!------------- MLP Error ---------------> ')
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
else:
#task 12627 time series profiler removed
if lentFeature>1:
self.modelName='var'
self.data.to_csv(trained_data_file)
else:
self.modelName='arima'
if self.modelName.lower()=='var':
tsModelTestObj=timeseriesModelTests(self.data,self.targetFeature,self.dateTimeFeature,0)
self.data,self.dictDiffCount=tsModelTestObj.StatinaryChecks(self.dictDiffCount)
#self.log.info('Status:- |... Stationary Check Done.')
gtestResults,countVariables=tsModelTestObj.grangersCausationMatrix(self.data,tFeature)
if countVariables >= (lentFeature*lentFeature)-(lentFeature) or ((lentFeature*lentFeature)-(lentFeature))/2 :
coIntegrationVectors=tsModelTestObj.coIntegrationTest(self.data)
if coIntegrationVectors<=lentFeature:
self.log.info("There are statistically significant relationship in data ")
self.log.info('Status:- |... Statistically Check Done. Statistically significant relations')
else:
self.log.info("There are no statistically significant relationship in data")
self.log.info('Status:- |... Statistically Check Done. No statistically significant relations')
else:
self.modelName='arima'
if self.modelName.lower()=='var':
try:
self.log.info('ARIMA, FBProphet cannot apply, Input data contains more than one feature, only VAR algorithm can apply, applying VAR by AION \\n')
status,modelName,aic,aic_score,model_fit,selectedColumns,error_matrix,scoredetails,predict_var,dictDiffCount,pred_freq,additional_regressors,lag_order = self.getVARmodel()
if status.lower() == 'success':
filename,saved_model = self.save_model(model_fit)
predict_var.to_csv(predicted_data_file)
return self.modelName,modelName,aic,aic_score,model_fit,selectedColumns,error_matrix,scoredetails,dictDiffCount,pred_freq,additional_regressors,filename,saved_model,lag_order,'NA'
else:
raise Exception('Exception during VAR model training')
except Exception as inst:
self.log.info('<!------------- Var model Error ---------------> ')
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
if self.modelName.lower() == 'arima':
try:
status,modelName,aic,scoringparam_v,model_fit,selectedColumns,error_matrix,scoredetails,dictDiffCount,pred_freq,additional_regressors,rmse_arima_act,predict_output = self.getARIMAmodel(predicted_data_file)
if status.lower() == 'success':
predict_output.to_csv(predicted_data_file)
filename,saved_model = self.save_model(model_fit)
lag_order=0
return self.modelName,modelName,aic,scoringparam_v,model_fit,selectedColumns,error_matrix,scoredetails,dictDiffCount,pred_freq,additional_regressors,filename,saved_model,lag_order,'NA'
else:
raise Exception('Exception during model training')
except Exception as inst:
self.log.info('<!------------- Arima Error ---------------> '+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
except Exception as inst:
self.log.info('<!------------- TimeSeries Learning Error ---------------> '+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def invertTransformation(self,Xtrain,targetFeature, preddf,dictDiffCount):
try:
dfforecast = preddf.copy()
self.log.info(dfforecast.head(5))
columns =targetFeature.split(",")
self.log.info(columns)
self.log.info(dictDiffCount)
for col in columns:
if col in dictDiffCount:
if dictDiffCount[col]==2:
dfforecast[col] = (Xtrain[col].iloc[-1]-Xtrain[col].iloc[-2]) + dfforecast[col].cumsum()
dfforecast[col] = Xtrain[col].iloc[-1] + dfforecast[col].cumsum()
# Roll back 1st Diff
return dfforecast
except Exception as inst:
self.log.info('<!------------- invertTransformation Error ---------------> '+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import pandas as pd
# import os
import tensorflow as tf
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import math
from sklearn.metrics import mean_squared_error
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras import Sequential
from tensorflow.keras.layers import LSTM
import logging
# import kerastuner
import keras_tuner
#from keras_tuner.engine.hyperparameters import HyperParameters
from keras_tuner.tuners import RandomSearch,BayesianOptimization ,Hyperband
import warnings
warnings.simplefilter("ignore", UserWarning)
# from keras.models import load_model
# from tensorflow.keras.optimizers import SGD
# from tensorflow.keras.utils import load_model
from tensorflow.keras.models import load_model
class timeseriesDLUnivariate:
def __init__(self,configfile,testpercentage,targetFeature,dateTimeFeature,modelName):
self.look_back=None
#Preprocessed dataframe
# self.df=df
self.savedmodelname=None
self.deploy_location=None
self.epochs=None
self.batch_size=None
self.hidden_layers=None
self.optimizer=None
self.activation_fn=None
self.loss_fn=None
self.first_layer=None
self.dropout=None
self.model_name=None
self.hpt_train=None
##Below is model type (MLP or lstm)
self.model_type=modelName
#self.dataFolderLocation=str(dataFolderLocation)
##Added for ts hpt
self.tuner_algorithm=""
self.dl_params = configfile
# self.data=data
self.targetFeature=targetFeature
self.dateTimeFeature=dateTimeFeature
self.testpercentage = testpercentage
self.log = logging.getLogger('eion')
#To extract dict key,values
def extract_params(self,dict):
self.dict=dict
for k,v in self.dict.items():
return k,v
##Get deep learning model hyperparameter from advanced config
def getdlparams(self):
val=self.dl_params
self. |
log.info('-------> The given mlp/lstm timeseries algorithm parameters:>>')
self.log.info(" "+str(val))
for k,v in val.items():
try:
if (k == "tuner_algorithm"):
self.tuner_algorithm=str(v)
elif (k == "activation"):
self.activation_fn=str(v)
elif (k == "optimizer"):
self.optimizer=str(v)
elif (k == "loss"):
self.loss_fn=str(v)
elif (k == "first_layer"):
if not isinstance(k,list):
self.first_layer=str(v).split(',')
else:
self.first_layer=k
elif (k == "lag_order"):
if isinstance(k,list):
k = ''.join(v)
k=int(float(str(v)))
else:
self.look_back=int(float(str(v)))
elif (k == "hidden_layers"):
self.hidden_layers=int(v)
elif (k == "dropout"):
if not isinstance(k,list):
self.dropout=str(v).split(',')
else:
self.dropout=k
elif (k == "batch_size"):
self.batch_size=int(v)
elif (k == "epochs"):
self.epochs=int(v)
elif (k == "model_name"):
self.model_name=str(v)
except Exception as e:
self.log.info('Exception occured in deeep learn param reading, setting up default params.')
self.activation_fn="relu"
self.optimizer="adam"
self.loss_fn="mean_squared_error"
self.first_layer=[8,512]
self.hidden_layers=1
self.look_back=int(2)
self.dropout=[0.1,0.5]
self.batch_size=2
self.epochs=50
self.model_name="lstmmodel.h5"
continue
## Just use this if user need to create dataframe from input data.
def createdf(self,df):
target=""
# splitting reframed to X and Y considering the first column to be out target featureX=reframed.drop(['var1(t)'],axis=1)
X=df.drop([target],axis=1)
Y=df[target]
X_values=X.values
Y_values=Y.values
n_predict=len(Y_values)
train_X,train_Y = X_values[:(X_values.shape[0]-n_predict),:],Y_values[:(X_values.shape[0]-n_predict)]
test_X,test_Y = X_values[(X_values.shape[0]-n_predict):,:],Y_values[(X_values.shape[0]-n_predict):]
#reshaping train and test to feed to LSTM
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
return train_X,train_Y,test_X,test_Y
# convert an array of values into a dataset matrix
def numpydf(self,dataset, look_back):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
# x,y=numpy.array(dataX), numpy.array(dataY)
return np.array(dataX), np.array(dataY)
def model_save(self,model):
import os.path
savedmodelname=self.model_name
path = os.path.join(self.deploy_location,savedmodelname)
model.save(path)
return (savedmodelname)
## MLP model buid
def mlpDL(self,df):
self.log.info("MLP timeseries learning starts.....")
try:
self.getdlparams()
# look_back = self.look_back
dataset = df.values
dataset = dataset.astype('float32')
##The below Kwiatkowski-Phillips-Schmidt-Shin (kpss) statsmodel lib used for stationary check as well getting number of lags.
##number of lag calculated just for reference ,not used now.
#Dont delete this, just use in future.
from statsmodels.tsa.stattools import kpss
statistic, p_value, n_lags, critical_values = kpss(df[self.targetFeature])
self.log.info("Based on kpss statsmodel, lag order (time steps to calculate next prediction) is: \\t"+str(n_lags))
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
# split into train and test sets
train_size = int(len(dataset) * 0.80)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
self.hpt_train=train
tuner_alg=self.tuner_algorithm
try:
## Remove untitled_project dir in AION root folder created by previous tuner search run
import shutil
shutil.rmtree(r".\\untitled_project")
except:
pass
if (tuner_alg.lower()=="randomsearch"):
tuner=RandomSearch(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=5,executions_per_trial=3)
elif (tuner_alg.lower()=="bayesianoptimization"):
tuner=BayesianOptimization(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=5,executions_per_trial=3)
elif (tuner_alg.lower()=="hyperband"):
tuner=Hyperband(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_epochs=50,factor=3)
# tuner.search(X[...,np.new_axis],y,epochs=2,validation_data=(y[...,np.newaxis]))
stop_early = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5)
try:
tuner.search(x=train,y=train,validation_data=(test,test),callbacks=[stop_early])
except:
tuner.search(x=train,y=train,validation_split=0.2,callbacks=[stop_early])
# best_model=tuner.get_best_models(num_models=1)[0]
best_hps=tuner.get_best_hyperparameters(num_trials=1)[0]
best_first_layer=best_hps.get('units')
best_dropout=best_hps.get('Dropout_rate')
best_learning_rate=float(best_hps.get('learning_rate'))
self.log.info("best hyperparameter values for mlp: \\n"+str(best_hps.values))
look_back = 1 ## Because univariate problemtype
trainX, trainY = self.numpydf(train, look_back)
testX, testY = self.numpydf(test, look_back)
best_hmodel=tuner.hypermodel.build(best_hps)
##Added for mlp issue,because tuner build also need to compile.
try:
best_hmodel.compile(loss=self.loss_fn, optimizer=self.optimizer)
except:
pass
model_fit = best_hmodel.fit(trainX, trainY, epochs=self.epochs, batch_size=self.batch_size, verbose=2)
val_acc_per_epoch = model_fit.history['loss']
best_epoch = val_acc_per_epoch.index(min(val_acc_per_epoch)) + 1
self.log.info("MLP best epochs value:\\n"+str(best_epoch))
trainScore = best_hmodel.evaluate(trainX, trainY, verbose=0)
testScore = best_hmodel.evaluate(testX, testY, verbose=0)
#Scoring values for the model
mse_eval=testScore
try:
#If mse_eval is list of values
min_v=min(mse_eval)
except:
#If mse_eval is single value
min_v=mse_eval
rmse_eval = math.sqrt(min_v)
# generate predictions for training
trainPredict = best_hmodel.predict(trainX)
#print(testX)
testPredict = best_hmodel.predict(testX)
#print(testPredict)
# invert predictions, because we used mimanmax scaler
trainY = scaler.inverse_transform([trainY])
trainPredict = scaler.inverse_transform(trainPredict)
## For test data
testY = scaler.inverse_transform([testY])
testPredict = scaler.inverse_transform(testPredict)
## Creating dataframe for actual,predictions
predictions = pd.DataFrame(testPredict, columns=[self.targetFeature+'_pred'])
actual = pd.DataFrame(testY.T, columns=[self.targetFeature+'_actual'])
df_predicted=pd.concat([actual,predictions],axis=1)
#print(df_predicted)
from math import sqrt
from sklearn.metrics import mean_squared_error
try:
mse_mlp = mean_squared_error(testY.T,testPredict)
rmse_mlp=sqrt(mse_mlp)
self.log.info('mse_mlp: '+str(mse_mlp))
self.log.info('rmse_mlp: '+str(rmse_mlp))
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error
r2 = r2_score(testY.T,testPredict)
mae = mean_absolute_error(testY.T,testPredict)
self.log.info('r2_mlp: '+str(r2))
self.log.info('mae_mlp: '+str(mae))
except Exception as e:
import traceback
self.log.info("MLP dataframe creation error traceback: \\n"+str(traceback.print_exc()))
self.log.info(e)
# df_predicted.to_csv('mlp_prediction.csv')
except Exception as e:
self.log.info("MLP timeseries model traceback error msg e: "+str(e))
self.log.info("MLP training successfully completed.\\n")
return mse_mlp,rmse_mlp,r2,mae,best_hmodel,df_predicted,look_back,scaler
## Added function for hyperparam tuning (TFSTask:7033)
def build_model(self,hp):
try:
loss=self.loss_fn
optimizer=self.optimizer
try:
if optimizer.lower() == "adam":
optimizer=tf.keras.optimizers.Adam
elif(optimizer.lower() == "adadelta"):
optimizer=tf.keras.optimizers.experimental.Adadelta
elif(optimizer.lower() == "nadam"):
optimizer=tf.keras.optimizers.experimental.Nadam
elif(optimizer.lower() == "adagrad"):
optimizer=tf.keras.optimizers.experimental.Adagrad
elif(optimizer.lower() == "adamax"):
optimizer=tf.keras.optimizers.experimental.Adamax
elif(optimizer.lower() == "rmsprop"):
optimizer=tf.keras.optimizers.experimental.RMSprop
elif(optimizer.lower() == "sgd"):
optimizer=tf.keras.optimizers.experimental.SGD
else:
optimizer=tf.keras.optimizers.Adam
except:
optimizer=tf.keras.optimizers.Adam
pass
first_layer_min=round(int(self.first_layer[0]))
first_layer_max=round(int(self.first_layer[1]))
dropout_min=float(self.dropout[0])
dropout_max=float(self.dropout[1])
model=tf.keras.Sequential()
if (self.model_type.lower() == 'lstm'):
model.add(LSTM(units=hp.Int('units',min_value=first_layer_min,max_value=first_layer_max,step=16),input_shape=(self.look_back,self.hpt_train.shape[1]),
activation=hp.Choice('dense_activation',values=['relu'])))
elif (self.model_type.lower() == 'mlp'):
# model.add(Dense(units=hp.Int('units',min_value=first_layer_min,max_value=first_layer_max,step=16),input_dim=(hp.Int('time_steps',min_value=look_back_min,max_value=look_back_max,step=1)),
# activation='relu'))
##input_dim is 1 because mlp is for univariate.
model.add(Dense(units=hp.Int('units',min_value=first_layer_min,max_value=first_layer_max,step=16),input_dim=(1),activation='relu'))
model.add(Dropout(hp.Float('Dropout_rate',min_value=dropout_min,max_value=dropout_max,step=0.1)))
model.add(Dense(units=1))
model.compile(optimizer=optimizer(hp.Choice('learning_rate',values=[1e-1,1e-2,1e-3,1e-4])),loss=loss,metrics=[loss])
except Exception as e:
import traceback
self.log.info("lstm errorbuild_model traceback: \\n"+str(traceback.print_exc()))
return model
##LSTM timeseries function call
def ts_lstm(self,df):
self.log.info("lstm network model learning starts.....\\n")
try:
self.getdlparams()
dataset = df.values
dataset = dataset.astype('float32')
##The below Kwiatkowski-Phillips-Schmidt-Shin (kpss) statsmodel lib used for stationary check as well getting number of lags.
##number of lag calculated just for reference ,not used now.
#Dont delete this, just use in future.
from statsmodels.tsa.stattools import kpss
statistic, p_value, n_lags, critical_values = kpss(df[self.targetFeature])
self.log.info("Based on kpss statsmodel, lag order (time steps to calculate next prediction) is: \\t"+str( |
n_lags))
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
# split into train and test sets
train_size = int(len(dataset) * 0.80)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
self.hpt_train=train
tuner_alg=self.tuner_algorithm
try:
## Remove untitled_project dir in AION root folder created by previous tuner search run
import shutil
shutil.rmtree(r".\\untitled_project")
except:
pass
if (tuner_alg.lower()=="randomsearch"):
tuner=RandomSearch(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=5,executions_per_trial=3)
elif (tuner_alg.lower()=="bayesianoptimization"):
tuner=BayesianOptimization(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=5,executions_per_trial=3)
elif (tuner_alg.lower()=="hyperband"):
tuner=Hyperband(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_epochs=50,factor=3)
# tuner.search(X[...,np.new_axis],y,epochs=2,validation_data=(y[...,np.newaxis]))
from keras.callbacks import EarlyStopping
stop_early = EarlyStopping(monitor='val_loss', patience=5)
##Need both x and y with same dimention.
tuner.search(x=train,y=train,validation_split=0.2,callbacks=[stop_early])
# tuner.search(x=train,y=test,validation_data=(test,test),callbacks=[stop_early])
best_hps=tuner.get_best_hyperparameters(num_trials=1)[0]
best_time_steps=self.look_back
self.log.info("best lag order or lookback (time_steps) for LSTM: \\n"+str(best_time_steps))
self.log.info("best hyperparameter values for LSTM: \\n"+str(best_hps.values))
look_back = best_time_steps
trainX, trainY = self.numpydf(train, look_back)
testX, testY = self.numpydf(test, look_back)
# reshape input to be [samples, time steps, features]
trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))
#create and fit the LSTM network
best_hmodel=tuner.hypermodel.build(best_hps)
try:
best_hmodel.compile(loss=self.loss_fn, optimizer=self.optimizer)
except:
pass
model_fit = best_hmodel.fit(trainX, trainY, validation_split=0.2, epochs=self.epochs, batch_size=self.batch_size, verbose=2)
val_acc_per_epoch = model_fit.history['loss']
best_epoch = val_acc_per_epoch.index(min(val_acc_per_epoch)) + 1
self.log.info("best epochs value:\\n"+str(best_epoch))
# best_hmodel=tuner.hypermodel.build(best_hps)
# best_hmodel.fit(x=trainX,y=trainY,validation_split=0.2,epochs=best_epoch)
##Using model_evaluate,calculate mse
# mse_eval = model.evaluate(testX, testY, verbose=0)
mse_eval = best_hmodel.evaluate(testX, testY, verbose=0)
try:
#If mse_eval is list of values
min_v=min(mse_eval)
except:
#If mse_eval is single value
min_v=mse_eval
rmse_eval=math.sqrt(min_v)
# self.log.info('LSTM mse:'+str(mse_eval))
# self.log.info('LSTM rmse:'+str(rmse_eval))
# lstm time series predictions
trainPredict = best_hmodel.predict(trainX)
testPredict = best_hmodel.predict(testX)
# invert predictions, because we used mim=nmax scaler
trainY = scaler.inverse_transform([trainY])
trainPredict = scaler.inverse_transform(trainPredict)
testY = scaler.inverse_transform([testY])
testPredict = scaler.inverse_transform(testPredict)
## Creating dataframe for actual,predictions
predictions = pd.DataFrame(testPredict, columns=[self.targetFeature+'_pred'])
actual = pd.DataFrame(testY.T, columns=[self.targetFeature+'_actual'])
df_predicted=pd.concat([actual,predictions],axis=1)
from math import sqrt
from sklearn.metrics import mean_squared_error
try:
mse_lstm=None
mse_lstm = mean_squared_error(testY.T,testPredict)
rmse_lstm=sqrt(mse_lstm)
self.log.info("mse_lstm: "+str(mse_lstm))
self.log.info("rmse_lstm: "+str(rmse_lstm))
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error
r2 = r2_score(testY.T,testPredict)
mae = mean_absolute_error(testY.T,testPredict)
self.log.info('r2_lstm: '+str(r2))
self.log.info('mae_lstm: '+str(mae))
except Exception as e:
self.log.info("lstm error loss fns"+str(e))
return 'Error',0,0,0,0,None,pd.DataFrame(),0,None
except Exception as e:
import traceback
self.log.info("lstm training error traceback: \\n"+str(traceback.print_exc()))
return 'Error',0,0,0,0,None,pd.DataFrame(),0,None
return 'Success',mse_lstm,rmse_lstm,r2,mae,best_hmodel,df_predicted,look_back,scaler
if __name__ == '__main__':
print('Inside timeseriesDLUnivariate main....\\n')
# tsdl_obj = timeseriesDLUnivariate()
## for testing purpose
'''
df1= pd.read_csv(r"C:\\aiontest\\testPrograms\\Data\\energydemand.csv",encoding='utf-8', engine='python')
dateTimeFeature = "utcTimeStamp"
targetFeature="temperature"
try:
df1[dateTimeFeature] = pd.to_datetime(df1[dateTimeFeature]) #, format = '%d/%m/%Y %H.%M')
except:
pass
tdata = df1.drop([dateTimeFeature], axis=1)
tdata.index = df1[dateTimeFeature]
tdata = pd.DataFrame(tdata[targetFeature])
cols = tdata.columns
mse,rmse,model = tsdl_obj.mlpDL(tdata)
lmse,lrmse,lstmmodel = tsdl_obj.ts_lstm(tdata)
print("mlp mse: \\n",mse)
print("mlp rmse: \\n",rmse)
print("lstm mse: \\n",lmse)
print("lstm rmse: \\n",lrmse)
savedmodelname=tsdl_obj.model_save(lstmmodel)
'''
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import sys
import pandas as pd
import numpy as np
import numpy
import pandas
import math
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Input, Dense, TimeDistributed, LSTM, Dropout, RepeatVector
from sklearn.preprocessing import MinMaxScaler
import logging
import tensorflow as tf
import keras_tuner
#from keras_tuner.engine.hyperparameters import HyperParameters
from keras_tuner.tuners import RandomSearch,BayesianOptimization ,Hyperband
from sklearn.model_selection import train_test_split
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator
import warnings
warnings.simplefilter("ignore", UserWarning)
from sklearn.metrics import mean_absolute_percentage_error
class tsDLMultiVrtInUniVrtOut:
def __init__(self,configfile,testpercentage,targetFeature,dateTimeFeature):
self.look_back=None
self.look_forward=None
# self.df=df
self.epochs=None
self.batch_size=None
self.hidden_layers=None
self.optimizer=None
self.activation_fn="relu"
self.loss_fn=None
self.first_layer=None
self.dropout=None
self.model_name=None
self.dl_params = configfile
# self.data=data
self.targetFeature=targetFeature
self.dateTimeFeature=dateTimeFeature
self.testpercentage = float(testpercentage)
self.log = logging.getLogger('eion')
##Added for ts hpt (TFSTask:7033)
self.tuner_algorithm=""
self.num_features=0
##Get deep learning model hyperparameter from advanced config
def getdlparams(self):
val=self.dl_params
self.log.info('-------> The given mlp/lstm timeseries algorithm parameters:>>')
self.log.info(" "+str(val))
for k,v in val.items():
try:
if (k == "tuner_algorithm"):
self.tuner_algorithm=str(v)
elif (k == "activation"):
if not isinstance(k,list):
self.activation_fn=str(v).split(',')
else:
self.activation_fn=k
elif (k == "optimizer"):
self.optimizer=str(v)
elif (k == "loss"):
self.loss_fn=str(v)
elif (k == "first_layer"):
if not isinstance(k,list):
self.first_layer=str(v).split(',')
else:
self.first_layer=k
elif (k == "lag_order"):
if isinstance(k,list):
k = ''.join(v)
k=int(float(str(v)))
else:
self.look_back=int(float(str(v)))
elif (k == "forward_order"):
if isinstance(k,list):
k = ''.join(v)
k=int(float(str(v)))
else:
self.look_forward=int(float(str(v)))
elif (k == "hidden_layers"):
self.hidden_layers=int(v)
elif (k == "dropout"):
if not isinstance(k,list):
self.dropout=str(v).split(',')
else:
self.dropout=k
elif (k == "batch_size"):
self.batch_size=int(v)
elif (k == "epochs"):
self.epochs=int(v)
elif (k == "model_name"):
self.model_name=str(v)
except Exception as e:
self.log.info('Exception occured in deeep learn param reading, setting up default params.')
self.activation_fn="relu"
self.optimizer="adam"
self.loss_fn="mean_squared_error"
self.first_layer=[8,512]
self.hidden_layers=1
self.look_back=int(2)
self.dropout=[0.0,0.1,0.01]
self.batch_size=2
self.epochs=50
self.model_name="lstmmodel.h5"
continue
# Reshape the data to the required input shape of the LSTM model
def create_dataset(self,series, n_past, n_future, targetcolindx):
X, y = list(), list()
for window_start in range(len(series)):
past_end = window_start + n_past
future_end = past_end + n_future
if future_end > len(series):
break
# slicing the past and future parts of the window
past, future = series[window_start:past_end, :], series[past_end:future_end, targetcolindx]
X.append(past)
y.append(future)
return np.array(X), np.array(y)
#return X, y
## Added function for hyperparam tuning (TFSTask:7033)
def build_model(self,hp):
n_features = self.num_features
try:
loss=self.loss_fn
optimizer=self.optimizer
# self.getdlparams()
try:
if optimizer.lower() == "adam":
optimizer=tensorflow.keras.optimizers.Adam
elif(optimizer.lower() == "adadelta"):
optimizer=tensorflow.keras.optimizers.experimental.Adadelta
elif(optimizer.lower() == "nadam"):
optimizer=tensorflow.keras.optimizers.experimental.Nadam
elif(optimizer.lower() == "adagrad"):
optimizer=tensorflow.keras.optimizers.experimental.Adagrad
elif(optimizer.lower() == "adamax"):
optimizer=tensorflow.keras.optimizers.experimental.Adamax
elif(optimizer.lower() == "rmsprop"):
optimizer=tensorflow.keras.optimizers.experimental.RMSprop
elif(optimizer.lower() == "sgd"):
optimizer=tensorflow.keras.optimizers.experimental.SGD
else:
optimizer=tensorflow.keras.optimizers.Adam
except:
optimizer=tf.keras.optimizers.Adam
pass
# look_back_min=int(self.look_back[0])
# look_back_max=int(self.look_back[1])
first_layer_min=round(int(self.first_layer[0]))
first_layer_max= |
round(int(self.first_layer[1]))
dropout_min=float(self.dropout[0])
dropout_max=float(self.dropout[1])
dropout_step=float(self.dropout[2])
#import pdb; pdb.set_trace()
n_past= self.look_back
n_future = self.look_back
encoder_l = {}
encoder_outputs = {}
encoder_states = {}
decoder_l = {}
decoder_outputs = {}
encoder_inputs = Input(shape=(n_past, n_features))
try:
if(self.hidden_layers > 0):
encoder_l[0] = LSTM(units=hp.Int('enc_input_unit',min_value=first_layer_min,max_value=first_layer_max,step=32), activation = hp.Choice(f'enc_input_activation', values = self.activation_fn), return_sequences = True, return_state=True)
else:
encoder_l[0] = LSTM(units=hp.Int('enc_input_unit',min_value=first_layer_min,max_value=first_layer_max,step=32), activation = hp.Choice(f'enc_input_activation', values = self.activation_fn), return_state=True)
except Exception as e:
import traceback
self.log.info("lstm build traceback: \\n"+str(traceback.print_exc()))
model=tf.keras.Sequential()
return model
encoder_outputs[0] = encoder_l[0](encoder_inputs)
encoder_states[0] = encoder_outputs[0][1:]
if(self.hidden_layers > 0):
for indx in range(self.hidden_layers):
lindx = indx + 1
if lindx == self.hidden_layers:
encoder_l[lindx] = LSTM(units=hp.Int(f'enc_lstm_units_{lindx}',min_value=first_layer_min,max_value=first_layer_max,step=32), dropout=hp.Float(f'enc_lstm_dropout_{lindx}',min_value=dropout_min,max_value=dropout_max,step=dropout_step), activation = hp.Choice(f'enc_lstm_activation_{lindx}', values = self.activation_fn), return_state=True)
else:
encoder_l[lindx] = LSTM(units=hp.Int(f'enc_lstm_units_{lindx}',min_value=first_layer_min,max_value=first_layer_max,step=32), dropout=hp.Float(f'enc_lstm_dropout_{lindx}',min_value=dropout_min,max_value=dropout_max,step=dropout_step), activation = hp.Choice(f'enc_lstm_activation_{lindx}', values = self.activation_fn), return_sequences = True, return_state=True)
encoder_outputs[lindx] = encoder_l[lindx](encoder_outputs[indx][0])
encoder_states[lindx] = encoder_outputs[lindx][1:]
decoder_inputs = RepeatVector(n_future)(encoder_outputs[self.hidden_layers][0])
else:
decoder_inputs = RepeatVector(n_future)(encoder_outputs[0][0])
#
if(self.hidden_layers > 0):
decoder_l[0] = LSTM(encoder_states[0][0].get_shape()[1], activation = hp.Choice(f'dec_input_activation', values = self.activation_fn), return_sequences=True)(decoder_inputs,initial_state = encoder_states[0])
else:
decoder_l[0] = LSTM(encoder_states[0][0].get_shape()[1], activation = hp.Choice(f'dec_input_activation', values = self.activation_fn), return_sequences=True)(decoder_inputs,initial_state = encoder_states[0])
if(self.hidden_layers > 0):
for indx in range(self.hidden_layers):
lindx = indx + 1
decoder_l[lindx] = LSTM(encoder_states[lindx][0].get_shape()[1], activation = hp.Choice(f'dec_lstm_activation_{lindx}', values = self.activation_fn), return_sequences=True)(decoder_l[indx],initial_state = encoder_states[lindx])
decoder_outputs[0] = TimeDistributed(tf.keras.layers.Dense(decoder_l[self.hidden_layers][0].get_shape()[1], activation = hp.Choice(f'dec_output_activation_1', values = self.activation_fn)))(decoder_l[self.hidden_layers])
decoder_outputs[1] = TimeDistributed(tf.keras.layers.Dense(1))(decoder_outputs[0])
else:
# decoder_outputs[0] = TimeDistributed(tf.keras.layers.Dense(decoder_l[0][0].get_shape()[1]))(decoder_l[0])
# decoder_outputs[1] = LSTM(200, return_sequences=True)(decoder_outputs[0])
# decoder_outputs[2] = tf.keras.layers.Flatten()(decoder_outputs[1])
# decoder_outputs[3] = tf.keras.layers.Dense(1)(decoder_outputs[2])
decoder_outputs[0] = TimeDistributed(tf.keras.layers.Dense(decoder_l[0][0].get_shape()[1], activation = hp.Choice(f'dec_output_activation_1', values = self.activation_fn)))(decoder_l[0])
decoder_outputs[1] = TimeDistributed(tf.keras.layers.Dense(1))(decoder_outputs[0])
#
model = tf.keras.models.Model(encoder_inputs,decoder_outputs[1])
self.log.info(model.summary())
model.compile(optimizer=optimizer(hp.Choice('learning_rate',values=[1e-1,1e-2,1e-3,1e-4])),loss=loss,metrics=[self.loss_fn])
except Exception as e:
import traceback
self.log.info(",Hyperparam tuning build_model err msg: \\n"+ str(e))
self.log.info("Hyperparam tuning build_model err traceback: \\n"+str(traceback.print_exc()))
return model
##LSTM ecncoder decoder with multivariate input and univarite output prediction function (lstm model, train, prediction, metrics)
def lstm_encdec_mvin_uvout(self,df):
try:
loss=self.loss_fn
self.getdlparams()
n_features = len(df.columns)
self.num_features=n_features
n_past= self.look_back
n_future = self.look_back
try:
if (type(self.targetFeature) is list):
pass
else:
self.targetFeature = list(self.targetFeature.split(","))
except:
pass
targetColIndx = []
for target in self.targetFeature:
targetColIndx.append(df.columns.get_loc(target))
#if user doesnt applies any transformation, this will get applied
scaler=MinMaxScaler()
df_trnsf=scaler.fit_transform(df)
train_data, test_data = train_test_split(df_trnsf, test_size=0.2, shuffle=False)
tuner_alg=self.tuner_algorithm
#The below create_dataset only for getting best model and best hyperparameters
X_train, y_train = self.create_dataset(train_data, n_past, n_future, targetColIndx)
X_test, y_test = self.create_dataset(test_data, n_past, n_future, targetColIndx)
# X_train = X_train.reshape((X_train.shape[0], X_train.shape[1],n_features))
# y_train = y_train.reshape((y_train.shape[0], y_train.shape[1], 1))
self.log.info("Hyperparameter tuning algorithm is given by user (AION->Advanced configuration -> timeSeriesForecasting->LSTM): \\n"+str(tuner_alg))
try:
## Remove untitled_project dir in AION root folder created by previous tuner search run
import shutil
shutil.rmtree(r".\\untitled_project")
except:
pass
try:
if (tuner_alg.lower()=="randomsearch"):
tuner=RandomSearch(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=1,executions_per_trial=3)
elif (tuner_alg.lower()=="bayesianoptimization"):
tuner=BayesianOptimization(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=5,executions_per_trial=3)
elif (tuner_alg.lower()=="hyperband"):
tuner=Hyperband(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_epochs=50,factor=3)
else:
self.log.info("The given alg is not implemented. Using default hyperparam tuning algorithm: RandomSearch.\\n")
tuner=RandomSearch(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=5,executions_per_trial=3)
from keras.callbacks import EarlyStopping
stop_early = EarlyStopping(monitor='val_loss', patience=5)
except Exception as e:
import traceback
self.log.info("The given alg have some issue, Using default hyperparam tuning algorithm: RandomSearch.\\n"+str(e))
tuner=RandomSearch(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=1,executions_per_trial=3)
self.log.info("Started Exception default Random Search")
#hpt search for best params
try:
self.log.info("First try: Tuner search started")
tuner.search(X_train, y_train,validation_data=(X_test, y_test), callbacks=[stop_early])
self.log.info("First try: Tuner search ends")
except Exception as e:
self.log.info("Second try: Tuner search starts.\\n"+str(e))
tuner.search(x=X_train,y=y_train,validation_split=0.2, callbacks=[stop_early])
self.log.info("Second try: Tuner search ends")
# best_model = tuner.get_best_models(num_models=1)[0]
#self.log.info("best_model.summary(): \\n"+str(best_model.summary()))
best_hps=tuner.get_best_hyperparameters(num_trials=1)[0]
self.log.info("TS Multivariate LSTM best hyperparameter values:\\n"+str(best_hps.values))
self.log.info("Activation fn:\\n"+str(self.activation_fn))
n_input=self.look_back
best_hmodel=tuner.hypermodel.build(best_hps)
optimizer=self.optimizer
learning_rate=float(best_hps.get('learning_rate'))
try:
##TFSTask:7033, Added below try block for time series hyperparam tuning, here, for any optimizer, best learning_rate is provided from best_hps.
try:
if optimizer.lower() == "adam":
optimizer=tensorflow.keras.optimizers.Adam(learning_rate=learning_rate)
elif(optimizer.lower() == "adadelta"):
optimizer=tensorflow.keras.optimizers.experimental.Adadelta(learning_rate=learning_rate)
elif(optimizer.lower() == "nadam"):
optimizer=tensorflow.keras.optimizers.experimental.Nadam(learning_rate=learning_rate)
elif(optimizer.lower() == "adagrad"):
optimizer=tensorflow.keras.optimizers.experimental.Adagrad(learning_rate=learning_rate)
elif(optimizer.lower() == "adamax"):
optimizer=tensorflow.keras.optimizers.experimental.Adamax(learning_rate=learning_rate)
elif(optimizer.lower() == "rmsprop"):
optimizer=tensorflow.keras.optimizers.experimental.RMSprop(learning_rate=learning_rate)
elif(optimizer.lower() == "sgd"):
optimizer=tensorflow.keras.optimizers.experimental.SGD(learning_rate=learning_rate)
else:
optimizer=tensorflow.keras.optimizers.Adam(learning_rate=learning_rate)
except:
optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate)
pass
##From best hyperparameter values, now creating multivariate time series model using time generator.
generatorTrain=TimeseriesGenerator(X_train, y_train, length=n_past, batch_size=self.batch_size)
# generatorTest=TimeseriesGenerator(test,test,length=n_input,batch_size=self.batch_size)
batch_0=generatorTrain[0]
x,y=batch_0
epochs=int(self.epochs)
##Multivariate LSTM model
try:
encoder_l = {}
encoder_outputs = {}
encoder_states = {}
decoder_l = {}
decoder_outputs = {}
enc_lstm_dropout = {}
enc_input_unit = best_hps.get('enc_input_unit')
enc_input_activation = best_hps.get('enc_input_activation')
dec_input_activation = best_hps.get('dec_input_activation')
dec_output_activation_1 = best_hps.get('dec_output_activation_1')
enc_lstm_units = {}
enc_lstm_activation = {}
dec_lstm_activation = {}
for indx in range(self.hidden_layers):
lindx = indx + 1
enc_lstm_units[lindx] = best_hps.get('enc_lstm_units_'+str(lindx))
enc_lstm_activation[lindx] = best_hps.get('enc_lstm_activation_'+str(lindx))
dec_lstm_activation[lindx] = best_hps.get('dec_lstm_activation_'+str(lindx))
enc_lstm |
_dropout[lindx] = best_hps.get('enc_lstm_dropout_'+str(lindx))
encoder_inputs = Input(shape=(n_past, n_features))
if(self.hidden_layers > 0):
encoder_l[0] = LSTM(enc_input_unit, activation = enc_input_activation, return_sequences = True, return_state=True)
else:
encoder_l[0] = LSTM(enc_input_unit, activation = enc_input_activation, return_state=True)
encoder_outputs[0] = encoder_l[0](encoder_inputs)
encoder_states[0] = encoder_outputs[0][1:]
if(self.hidden_layers > 0):
for indx in range(self.hidden_layers):
lindx = indx + 1
if lindx == self.hidden_layers:
encoder_l[lindx] = LSTM(enc_lstm_units[lindx], dropout = enc_lstm_dropout[lindx], activation = enc_lstm_activation[lindx], return_state=True)
else:
encoder_l[lindx] = LSTM(enc_lstm_units[lindx], dropout = enc_lstm_dropout[lindx], activation = enc_lstm_activation[lindx], return_sequences = True, return_state=True)
encoder_outputs[lindx] = encoder_l[lindx](encoder_outputs[indx][0])
encoder_states[lindx] = encoder_outputs[lindx][1:]
decoder_inputs = RepeatVector(n_future)(encoder_outputs[self.hidden_layers][0])
else:
decoder_inputs = RepeatVector(n_future)(encoder_outputs[0][0])
#
if(self.hidden_layers > 0):
decoder_l[0] = LSTM(encoder_states[0][0].get_shape()[1], activation = dec_input_activation, return_sequences=True)(decoder_inputs,initial_state = encoder_states[0])
else:
decoder_l[0] = LSTM(encoder_states[0][0].get_shape()[1], activation = dec_input_activation, return_sequences=True)(decoder_inputs,initial_state = encoder_states[0])
if(self.hidden_layers > 0):
for indx in range(self.hidden_layers):
lindx = indx + 1
decoder_l[lindx] = LSTM(encoder_states[lindx][0].get_shape()[1], activation = dec_lstm_activation[lindx], return_sequences=True)(decoder_l[indx],initial_state = encoder_states[lindx])
decoder_outputs[0] = TimeDistributed(tf.keras.layers.Dense(decoder_l[self.hidden_layers][0].get_shape()[1], activation = dec_output_activation_1))(decoder_l[self.hidden_layers])
decoder_outputs[1] = TimeDistributed(tf.keras.layers.Dense(1))(decoder_outputs[0])
else:
decoder_outputs[0] = TimeDistributed(tf.keras.layers.Dense(decoder_l[0][0].get_shape()[1], activation = dec_output_activation_1))(decoder_l[0])
decoder_outputs[1] = TimeDistributed(tf.keras.layers.Dense(1))(decoder_outputs[0])
#
model = tf.keras.models.Model(encoder_inputs,decoder_outputs[1])
self.log.info(model.summary())
self.log.info("loss="+self.loss_fn)
model.compile(optimizer=optimizer,loss=self.loss_fn,metrics=[self.loss_fn])
#model.fit_generator(generatorTrain, epochs=epochs,shuffle=False, verbose=0)
model.fit(X_train, y_train, batch_size=self.batch_size, epochs=epochs,shuffle=False, verbose=2)
except Exception as e:
import traceback
self.log.info("multivariate model build error: error msg:: \\n"+str(e))
return 'Error',0,0,0,0,None,pd.DataFrame(),0,None
#predictions = model.predict_generator(generatorTest)
except Exception as e:
import traceback
self.log.info("optimizer and timesereis generator build error: error msg:: \\n"+str(e))
self.log.info("optimizer and timesereis generator build error traceback: \\n"+str(traceback.print_exc()))
return 'Error',0,0,0,0,None,pd.DataFrame(),0,None
try:
predictions=[]
X_test, y_test = self.create_dataset(test_data, n_past, n_future, targetColIndx)
predictions = model.predict(X_test)
self.log.info(predictions)
#convert the x test(includes target) to 2d as inverse transform accepts only 2d values
xtestlen = len(X_test)
xtest_2d = X_test.ravel().reshape(xtestlen * n_past, n_features)
#inverse tranform of actual value
xtest_2d = scaler.inverse_transform(xtest_2d)
actual = xtest_2d[:, targetColIndx]
#inverse tranform of predicted value
prediction_1d = predictions.ravel()
prediction_1d = prediction_1d.reshape(len(prediction_1d),1)
self.log.info(prediction_1d)
xtest_2d[:, targetColIndx] = prediction_1d
xtest_2d = scaler.inverse_transform(xtest_2d)
predictions = xtest_2d[:, targetColIndx]
mse=None
rmse=None
## Creating dataframe for actual,predictions
try:
pred_cols=list()
actual_cols=list()
for i in range(len(self.targetFeature)):
pred_cols.append(self.targetFeature[i]+'_pred')
actual_cols.append(self.targetFeature[i]+'_actual')
predictions = pd.DataFrame(predictions.ravel(), columns=pred_cols)
actual = pd.DataFrame(actual.ravel(), columns=actual_cols)
df_predicted=pd.concat([actual,predictions],axis=1)
self.log.info("LSTM Multivariate prediction dataframe: \\n"+str(df_predicted))
from math import sqrt
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error
target=self.targetFeature
mse_dict={}
rmse_dict={}
mae_dict={}
mape_dict={}
r2_dict={}
lstm_var = 0
self.log.info(actual.shape)
self.log.info(actual)
self.log.info(predictions.shape)
self.log.info(predictions)
mse = mean_squared_error(actual,predictions)
mse_dict[self.targetFeature[0]]=mse
rmse=sqrt(mse)
rmse_dict[self.targetFeature[0]]=rmse
lstm_var = lstm_var+rmse
self.log.info("Name of the target feature: "+str(self.targetFeature))
self.log.info("RMSE of the target feature: "+str(rmse))
r2 = r2_score(actual,predictions)
r2_dict[self.targetFeature[0]]=r2
mae = mean_absolute_error(actual,predictions)
mae_dict[self.targetFeature[0]]=mae
mape = mean_absolute_percentage_error(actual,predictions)
mape_dict[self.targetFeature[0]]=mape
## For VAR comparison, send last target mse and rmse from above dict
lstm_var = lstm_var/len(target)
select_msekey=list(mse_dict.keys())[-1]
l_mse=list(mse_dict.values())[-1]
select_rmsekey=list(rmse_dict.keys())[-1]
l_rmse=list(rmse_dict.values())[-1]
select_r2key=list(r2_dict.keys())[-1]
l_r2=list(r2_dict.values())[-1]
select_maekey=list(mae_dict.keys())[-1]
l_mae=list(mae_dict.values())[-1]
l_mape=list(mape_dict.values())[-1]
self.log.info("Selected target feature of LSTM for best model selection: "+str(select_rmsekey))
self.log.info("lstm rmse: "+str(l_rmse))
self.log.info("lstm mse: "+str(l_mse))
self.log.info("lstm r2: "+str(l_r2))
self.log.info("lstm mae: "+str(l_mae))
self.log.info("lstm mape: "+str(l_mape))
except Exception as e:
import traceback
self.log.info("prediction error traceback: \\n"+str(traceback.print_exc()))
except Exception as e:
import traceback
self.log.info("dataframe creation error. err.msg: "+str(e))
self.log.info("dataframe creation error traceback: \\n"+str(traceback.print_exc()))
return 'Error',0,0,0,0,None,pd.DataFrame(),0,None
return 'Success',round(l_mse,2),round(l_rmse,2),round(l_r2,2),round(l_mae,2),model,df_predicted,n_input,scaler
# import os
#predicted_file_name='lstm_prediction_df.csv'
#predicted_file_path=os.path.join(self.dataFolderLocation,predicted_file_name)
#df_predicted.to_csv(predicted_file_path)
##save model
#model_path = os.path.join(self.dataFolderLocation,self.model_name)
#self.log.info("mlp model saved at: "+str(model_path))
#model.save(model_path)
except Exception as e:
import traceback
## Just use below traceback print to get detailed error information.
# import traceback
# print(" traceback error 7:\\n",traceback.print_exc())
## Enable traceback for debugging
self.log.info("dataframe creation error. err.msg: "+str(e))
self.log.info("Final exception traceback: \\n"+str(traceback.print_exc()))
return 'Error',0,0,0,0,None,pd.DataFrame(),0,None
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import pandas as pd
import os
import numpy as np
import numpy
import pandas
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
import logging
import tensorflow as tf
from tensorflow.keras.layers import Dropout
import math
import tensorflow as tf
import keras_tuner
#from keras_tuner.engine.hyperparameters import HyperParameters
from keras_tuner.tuners import RandomSearch,BayesianOptimization ,Hyperband
from sklearn.model_selection import train_test_split
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator
import warnings
warnings.simplefilter("ignore", UserWarning)
class timeseriesDLMultivariate:
def __init__(self,configfile,testpercentage,targetFeature,dateTimeFeature):
self.look_back=None
# self.df=df
self.epochs=None
self.batch_size=None
self.hidden_layers=None
self.optimizer=None
self.activation_fn="relu"
self.loss_fn=None
self.first_layer=None
self.dropout=None
self.model_name=None
self.dl_params = configfile
# self.data=data
self.targetFeature=targetFeature
self.dateTimeFeature=dateTimeFeature
self.testpercentage = float(testpercentage)
self.log = logging.getLogger('eion')
##Added for ts hpt (TFSTask:7033)
self.tuner_algorithm=""
self.num_features=0
##Get deep learning model hyperparameter from advanced config
def getdlparams(self):
val=self.dl_params
self.log.info('-------> The given mlp/lstm timeseries algorithm parameters:>>')
self.log.info(" "+str(val))
for k,v in val.items():
try:
if (k == "tuner_algorithm"):
self.tuner_algorithm=str(v)
elif (k == "activation"):
self.activation_fn=str(v)
elif (k == "optimizer"):
self.optimizer=str(v)
elif (k == "loss"):
self.loss_fn=str(v)
elif (k == "first_layer"):
if not isinstance(k,list):
self.first_layer=str(v).split(',')
else:
self.first_layer=k
elif (k == "lag_order"):
if isinstance(k,list):
k = ''.join(v)
k=int(float(str(v)))
else:
self.look_back=int(float(str(v)))
elif (k == "hidden_layers"):
self.hidden_layers=int(v)
elif (k == |
"dropout"):
if not isinstance(k,list):
self.dropout=str(v).split(',')
else:
self.dropout=k
elif (k == "batch_size"):
self.batch_size=int(v)
elif (k == "epochs"):
self.epochs=int(v)
elif (k == "model_name"):
self.model_name=str(v)
except Exception as e:
self.log.info('Exception occured in deeep learn param reading, setting up default params.')
self.activation_fn="relu"
self.optimizer="adam"
self.loss_fn="mean_squared_error"
self.first_layer=[8,512]
self.hidden_layers=1
self.look_back=int(2)
self.dropout=[0.1,0.5]
self.batch_size=2
self.epochs=50
self.model_name="lstmmodel.h5"
continue
# Reshape the data to the required input shape of the LSTM model
def create_dataset(self,X, y, n_steps):
Xs, ys = [], []
for i in range(len(X) - n_steps):
v = X.iloc[i:(i + n_steps)].values
Xs.append(v)
ys.append(y.iloc[i + n_steps])
return np.array(Xs), np.array(ys)
## Added function for hyperparam tuning (TFSTask:7033)
def build_model(self,hp):
n_features = len(self.targetFeature)
try:
loss=self.loss_fn
optimizer=self.optimizer
# self.getdlparams()
try:
if optimizer.lower() == "adam":
optimizer=tensorflow.keras.optimizers.Adam
elif(optimizer.lower() == "adadelta"):
optimizer=tensorflow.keras.optimizers.experimental.Adadelta
elif(optimizer.lower() == "nadam"):
optimizer=tensorflow.keras.optimizers.experimental.Nadam
elif(optimizer.lower() == "adagrad"):
optimizer=tensorflow.keras.optimizers.experimental.Adagrad
elif(optimizer.lower() == "adamax"):
optimizer=tensorflow.keras.optimizers.experimental.Adamax
elif(optimizer.lower() == "rmsprop"):
optimizer=tensorflow.keras.optimizers.experimental.RMSprop
elif(optimizer.lower() == "sgd"):
optimizer=tensorflow.keras.optimizers.experimental.SGD
else:
optimizer=tensorflow.keras.optimizers.Adam
except:
optimizer=tf.keras.optimizers.Adam
pass
# look_back_min=int(self.look_back[0])
# look_back_max=int(self.look_back[1])
first_layer_min=round(int(self.first_layer[0]))
first_layer_max=round(int(self.first_layer[1]))
dropout_min=float(self.dropout[0])
dropout_max=float(self.dropout[1])
model=tf.keras.Sequential()
try:
model.add(LSTM(units=hp.Int('units',min_value=first_layer_min,max_value=first_layer_max,step=16),input_shape=(self.look_back,self.num_features)))
except Exception as e:
import traceback
self.log.info("lstm build traceback: \\n"+str(traceback.print_exc()))
return model
model.add(Dropout(hp.Float('Dropout_rate',min_value=dropout_min,max_value=dropout_max,step=0.1)))
model.add(Dense(units=n_features))
model.compile(optimizer=optimizer(hp.Choice('learning_rate',values=[1e-1,1e-2,1e-3,1e-4])),loss=loss,metrics=[self.loss_fn])
except Exception as e:
self.log.info(",Hyperparam tuning build_model err msg: \\n"+ str(e))
return model
##Multivariate lstm prediction function (lstm model, train, prediction, metrics)
def lstm_multivariate(self,df):
try:
self.getdlparams()
n_features = len(self.targetFeature)
self.num_features=n_features
try:
if (type(self.targetFeature) is list):
pass
else:
self.targetFeature = list(self.targetFeature.split(","))
except:
pass
df_new = df[df.columns[df.columns.isin(self.targetFeature)]]
scaler=MinMaxScaler()
df_transformed=scaler.fit_transform(df_new)
## For hyperparam tuning below part is added.only for getting best model and best hyperparameters
train_size = int(len(df) * 0.80)
train_data, test_data = train_test_split(df, test_size=0.2, shuffle=False)
self.hpt_train=train_data
time_steps=self.look_back ## Just for initialization before hyperparameter tuning.
tuner_alg=self.tuner_algorithm
#The below create_dataset only for getting best model and best hyperparameters
X_train, y_train = self.create_dataset(train_data, train_data, time_steps)
X_test, y_test = self.create_dataset(test_data, test_data, time_steps)
self.log.info("Hyperparameter tuning algorithm is given by user (AION->Advanced configuration -> timeSeriesForecasting->LSTM): \\n"+str(tuner_alg))
try:
## Remove untitled_project dir in AION root folder created by previous tuner search run
import shutil
shutil.rmtree(r".\\untitled_project")
except:
pass
try:
if (tuner_alg.lower()=="randomsearch"):
tuner=RandomSearch(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=5,executions_per_trial=3)
elif (tuner_alg.lower()=="bayesianoptimization"):
tuner=BayesianOptimization(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=5,executions_per_trial=3)
elif (tuner_alg.lower()=="hyperband"):
tuner=Hyperband(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_epochs=50,factor=3)
else:
self.log.info("The given alg is not implemented. Using default hyperparam tuning algorithm: RandomSearch.\\n")
tuner=RandomSearch(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=5,executions_per_trial=3)
from keras.callbacks import EarlyStopping
stop_early = EarlyStopping(monitor='val_loss', patience=5)
except Exception as e:
self.log.info("The given alg have some issue, Using default hyperparam tuning algorithm: RandomSearch.\\n")
tuner=RandomSearch(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=5,executions_per_trial=3)
self.log.info("tuner errmsg:\\n"+str(e))
#hpt search for best params
try:
tuner.search(X_train, y_train,validation_data=(X_test, y_test),callbacks=[stop_early])
except:
tuner.search(x=X_train,y=y_train,validation_split=0.2,callbacks=[stop_early])
# best_model = tuner.get_best_models(num_models=1)[0]
# self.log.info("best_model.summary(): \\n"+str(best_model.summary()))
best_hps=tuner.get_best_hyperparameters(num_trials=1)[0]
self.log.info("TS Multivariate LSTM best hyperparameter values:\\n"+str(best_hps.values))
self.log.info("Activation fn:\\n"+str(self.activation_fn))
# time_steps_best=best_hps.get('time_steps')
n_input=self.look_back
best_hmodel=tuner.hypermodel.build(best_hps)
optimizer=self.optimizer
self.first_layer=best_hps.get('units')
self.dropout=best_hps.get('Dropout_rate')
learning_rate=float(best_hps.get('learning_rate'))
try:
##TFSTask:7033, Added below try block for time series hyperparam tuning, here, for any optimizer, best learning_rate is provided from best_hps.
try:
if optimizer.lower() == "adam":
optimizer=tensorflow.keras.optimizers.Adam(learning_rate=learning_rate)
elif(optimizer.lower() == "adadelta"):
optimizer=tensorflow.keras.optimizers.experimental.Adadelta(learning_rate=learning_rate)
elif(optimizer.lower() == "nadam"):
optimizer=tensorflow.keras.optimizers.experimental.Nadam(learning_rate=learning_rate)
elif(optimizer.lower() == "adagrad"):
optimizer=tensorflow.keras.optimizers.experimental.Adagrad(learning_rate=learning_rate)
elif(optimizer.lower() == "adamax"):
optimizer=tensorflow.keras.optimizers.experimental.Adamax(learning_rate=learning_rate)
elif(optimizer.lower() == "rmsprop"):
optimizer=tensorflow.keras.optimizers.experimental.RMSprop(learning_rate=learning_rate)
elif(optimizer.lower() == "sgd"):
optimizer=tensorflow.keras.optimizers.experimental.SGD(learning_rate=learning_rate)
else:
optimizer=tensorflow.keras.optimizers.Adam(learning_rate=learning_rate)
except:
optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate)
pass
##From best hyperparameter values, now creating multivariate time series model using time generator.
t_lb=1
test_size=t_lb+1
train,test = train_test_split(df_transformed,test_size=0.2,shuffle=False)
generatorTrain=TimeseriesGenerator(df_transformed,df_transformed,length=n_input,batch_size=self.batch_size)
# generatorTest=TimeseriesGenerator(test,test,length=n_input,batch_size=self.batch_size)
batch_0=generatorTrain[0]
x,y=batch_0
epochs=int(self.epochs)
##Multivariate LSTM model
try:
from tensorflow.keras.layers import Dropout
model=Sequential()
model.add(LSTM(self.first_layer,activation=self.activation_fn,input_shape=(n_input,n_features)))
model.add(Dropout(self.dropout))
model.add(Dense(n_features))
model.compile(optimizer=self.optimizer,loss=self.loss_fn)
#model.fit(generatorTrain,epochs=epochs,batch_size=self.batch_size,shuffle=False)
model.fit_generator(generatorTrain, epochs=epochs,shuffle=False, verbose=0)
# lstm_mv_testScore_mse = model.evaluate(x, y, verbose=0)
except Exception as e:
self.log.info("multivariate model build error: error msg:: \\n"+str(e))
return 'Error',0,0,0,0,None,pd.DataFrame(),0,None
#predictions = model.predict_generator(generatorTest)
except Exception as e:
self.log.info("multivariate model build error: error msg:: \\n"+str(e))
return 'Error',0,0,0,0,None,pd.DataFrame(),0,None
try:
predictions=[]
future_pred_len=n_input
#To get values for prediction,taking look_back steps of rows
first_batch=test[-future_pred_len:]
c_batch = first_batch.reshape((1,future_pred_len,n_features))
current_pred=None
for i in range(len(test)):
#get pred for firstbatch
current_pred=model.predict_generator(c_batch)[0]
predictions.append(current_pred)
#remove first val
c_batch_rmv_first=c_batch[:,1:,:]
#update
c_batch=np.append(c_batch_rmv_first,[[current_pred]],axis=1)
prediction_actual=scaler.inverse_transform(predictions)
test_data_actual=scaler.inverse_transform(test)
mse=None
rmse=None
## Creating dataframe for actual,predictions
try:
pred_cols=list()
for i in range(len(self.targetFeature)):
pred_cols.append(self.targetFeature[i]+'_pred')
predictions = pd.DataFrame(prediction_actual, columns=pred_cols)
actual = pd.DataFrame(test_data_actual, columns=self.targetFeature)
actual.columns = [str(col) + '_actual' for col in df.columns]
df_predicted=pd.concat([actual,predictions],axis=1)
self.log.info("LSTM Multivariate prediction dataframe: \\n"+str(df_predicted))
from math import sqrt
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error
target=self.targetFeature
mse_dict={}
rmse_dict={}
mae_dict={}
r2_dict={}
lstm_var = 0
for name in target:
index = df.columns.get_loc(name)
mse = mean_squared_error(test_data_actual[:,index],prediction_actual[:,index])
mse_dict[name]=mse
rmse=sqrt(mse)
rmse_dict[name]=rmse
lstm_var = lstm_var+rmse
self.log.info("Name of the target feature: "+str(name))
self.log.info("RMSE of the target feature: "+str(rmse))
r2 = r2_score(test_data_actual[:,index],prediction_actual[:,index])
r2_dict[name]=r2
mae = mean_absolute_error(test_data_actual[:,index],prediction_actual[:,index])
|
mae_dict[name]=mae
## For VAR comparison, send last target mse and rmse from above dict
lstm_var = lstm_var/len(target)
select_msekey=list(mse_dict.keys())[-1]
l_mse=list(mse_dict.values())[-1]
select_rmsekey=list(rmse_dict.keys())[-1]
l_rmse=list(rmse_dict.values())[-1]
select_r2key=list(r2_dict.keys())[-1]
l_r2=list(r2_dict.values())[-1]
select_maekey=list(mae_dict.keys())[-1]
l_mae=list(mae_dict.values())[-1]
self.log.info("Selected target feature of LSTM for best model selection: "+str(select_rmsekey))
self.log.info("lstm rmse: "+str(l_rmse))
self.log.info("lstm mse: "+str(l_mse))
self.log.info("lstm r2: "+str(l_r2))
self.log.info("lstm mae: "+str(l_mae))
except Exception as e:
import traceback
print(" traceback error:\\n",traceback.print_exc())
self.log.info("prediction error traceback: \\n"+str(traceback.print_exc()))
except Exception as e:
self.log.info("dataframe creation error. err.msg: "+str(e))
return 'Error',0,0,0,0,None,pd.DataFrame(),0,None
return 'Success',round(l_mse,2),round(l_rmse,2),round(l_r2,2),round(l_mae,2),model,df_predicted,n_input,scaler
# import os
#predicted_file_name='lstm_prediction_df.csv'
#predicted_file_path=os.path.join(self.dataFolderLocation,predicted_file_name)
#df_predicted.to_csv(predicted_file_path)
##save model
#model_path = os.path.join(self.dataFolderLocation,self.model_name)
#self.log.info("mlp model saved at: "+str(model_path))
#model.save(model_path)
except Exception as e:
## Just use below traceback print to get detailed error information.
# import traceback
# print(" traceback error 7:\\n",traceback.print_exc())
## Enable traceback for debugging
self.log.info("dataframe creation error. err.msg: "+str(e))
return 'Error',0,0,0,0,None,pd.DataFrame(),0,None
<s> import pandas as pd
import numpy as np
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.stattools import kpss
from statsmodels.tsa.seasonal import seasonal_decompose
import logging
import os
import warnings
warnings.filterwarnings('ignore')
## Main class to find out seassonality and stationary in timeseries data.
class tsStationarySeasonalityTest:
def __init__(self,df,deployLocation):
self.df=df
self.deployLocation=deployLocation
self.log = logging.getLogger('eion')
## to get the timeseries data stationary information
def stationary_model(self,df,target_features,stationary_check_method):
self.log.info("<------ Time Series stationary test started.....------------->\\n")
self.log.info("<------ Feature used:------------->\\t"+str(target_features))
stationary_status=None
if (stationary_check_method.lower()=='adfuller'):
stats_model=adfuller(df[target_features])
# p_val=adf_result[1]
statistic, p_value, n_lags, num_bservations,critical_values,info_criterion_best=stats_model[0],stats_model[1],stats_model[2],stats_model[3],stats_model[4],stats_model[5]
##Uncomment below logs when required.
self.log.info("Adfuller test (time series stationary test) p_value: \\t"+str(p_value))
# self.log.info("Adfuller test (time series stationary test) statistics: \\t"+str(statistic))
# self.log.info("Adfuller test (time series stationary test) number of lags (time steps): \\t"+str(n_lags))
# self.log.info("Adfuller test (time series stationary test) Critical values: \\n")
##To display critical values
# for key, value in stats_model[4].items():
# self.log.info(" \\t"+str(key)+"\\t"+str(value))
if (p_value>0.05):
stationary_status="feature is non-stationary"
self.log.info('Status:- |... '+str(target_features)+' is non stationary')
elif(p_value<0.05):
stationary_status="feature is stationary"
self.log.info('Status:- |... '+str(target_features)+' is stationary')
##kpss is opposite to ADF in considering null hypothesis. In KPSS, if null hypothesis,then it is stationary as oppose to ADF.
elif (stationary_check_method.lower()=='kpss'):
from statsmodels.tsa.stattools import kpss
stats_model = kpss(df[target_features])
statistic, p_value, n_lags, critical_values=stats_model[0],stats_model[1],stats_model[2],stats_model[3]
self.log.info("kpss test (time series stationary test) p_value: \\t"+str(p_value))
self.log.info("kpss test (time series stationary test) statistics: \\t"+str(statistic))
self.log.info("kpss test (time series stationary test) number of lags (time steps): \\t"+str(n_lags))
self.log.info("kpss test (time series stationary test) Critical values: \\n")
for key, value in stats_model[3].items():
self.log.info(" \\t"+str(key)+"\\t"+str(value))
##In kpss, the stationary condition is opposite to Adafuller.
if (p_value>0.05):
self.log.info('Status:- |... '+str(target_features)+' is stationary')
else:
self.log.info('Status:- |... '+str(target_features)+' is non stationary')
return stats_model,n_lags,p_value,stationary_status
## Get stationary details
def stationary_check(self,target_features,time_col,method):
df=self.df
try:
df[time_col]=pd.to_datetime(df[time_col])
except Exception as e:
self.log.info("issue in datetime conversion...\\n"+str(e))
df=df.set_index(time_col)
try:
stationary_check_method=method
except:
stationary_check_method='adfuller'
if (len(target_features) == 1):
try:
if isinstance(target_features,list):
target_features=''.join(target_features)
elif isinstance(target_features,int):
target_features=str(target_features)
elif isinstance(target_features,str):
pass
except Exception as e:
self.log.info("stationary check target feature error: \\t"+str(e))
stationary_result={}
stats_model,n_lags,p_value,stationary_status=self.stationary_model(df,target_features,stationary_check_method)
stationary_result[target_features]=stationary_status
elif(len(target_features) > 1):
stationary_result={}
for col in df.columns:
# self.log.info("Multivariate feature for Stationary check:\\t"+str(col))
stats_model,n_lags,p_value,stationary_status=self.stationary_model(df,col,stationary_check_method)
stationary_result[col]=stationary_status
else:
self.log.info("TS Stationarity Test: Error in target feature, pls check.\\n.")
# self.log.info("Feature based stationarity_result:\\n"+str(stationary_result))
# ## Stationary component for whole dataset
stationary_combined_res=dict()
# stats_model,n_lags,p_value,stationary_status=self.stationary_all_features(time_col,'adfuller')
c_dict=[k for k,v in stationary_result.items() if 'non-stationary' in v]
if (len(c_dict)>=1):
stationary_combined_res['dataframe_stationarity']='Non-Stationary'
self.log.info('Status:- |... Data is non stationarity')
else:
stationary_combined_res['dataframe_stationarity']='Stationary'
# self.log.info("Stationarity information for whole dataset:\\n"+str(stationary_combined_res))
self.log.info("Time series Stationarity test completed.\\n")
return stats_model,n_lags,p_value,stationary_result,stationary_combined_res
#Get seasonality by using seasonal_decompose lib.
def seasonality_model(self,target_features,df):
self.log.info("<------ Time Series Seasonality test started.....------------->\\n")
self.log.info("<------ Feature used:------------->\\n"+str(target_features))
seasonality_status=None
try:
try:
stats_model = kpss(df[target_features])
statistic, p_value, n_lags, critical_values=stats_model[0],stats_model[1],stats_model[2],stats_model[3]
except:
n_lags=1
pass
try:
df_target=self.df[target_features]
decompose_result_mult = seasonal_decompose(df_target,model='additive', extrapolate_trend='freq', period=n_lags)
except Exception as e:
self.log.info("Logging seasonality_model decompose_result_mult: \\t"+str(e))
##If additive model (type of seasonal component) failed, use multiplicative
decompose_result_mult = seasonal_decompose(df_target,model='multiplicative', extrapolate_trend='freq', period=1)
trend = decompose_result_mult.trend
observed=decompose_result_mult.observed
seasonal = decompose_result_mult.seasonal
residual = decompose_result_mult.resid
try:
if isinstance(df_target, pd.Series):
auto_correlation = df_target.autocorr(lag=n_lags)
# self.log.info("seasonality test: auto_correlation value:\\n"+str(auto_correlation))
elif isinstance(df_target, pd.DataFrame):
df_target = df_target.squeeze()
auto_correlation = df_target.autocorr(lag=n_lags)
# self.log.info("seasonality test: auto_correlation value:\\n"+str(auto_correlation))
except:
pass
self.log.info("<------------------ Time series Seasonality test result:------------------>")
if (seasonal.sum()==0):
seasonality_status="feature don't have seasonality (non seasonality)."
self.log.info('Status:- |... '+str(target_features)+' does not have seasonality')
self.log.info("<----- The model feature: "+str(target_features)+" does not have significant seasonality.----->\\n")
else:
seasonality_status="feature has seasonality."
self.log.info('Status:- |... '+str(target_features)+' have seasonality')
##Please use the below plot for GUI show (seasonality components)
# decompose_result_mult.plot()
df['observed'] = decompose_result_mult.observed
df['residual'] = decompose_result_mult.resid
df['seasonal'] = decompose_result_mult.seasonal
df['trend'] = decompose_result_mult.trend
df_name='timeseries_seasonality_check_'+f"{target_features}"+'.csv'
dir_n = os.path.join(self.deployLocation,'data','seasonality')
if not os.path.exists(dir_n):
os.makedirs(dir_n)
model_path=os.path.join(dir_n,df_name)
self.log.info("Seasonality information saved as dataframe at:\\t "+str(model_path))
## Seasonal component for whole dataset
df.to_csv(model_path)
except Exception as e:
self.log.info("Seasonality function exception: \\t"+str(e))
return df,decompose_result_mult,seasonality_status
##Main function to check seasonlity in data
def seasonal_check(self,target_features,time_col,seasonal_model):
df=self.df
# self.log.info("seasonal check started... \\n")
try:
df[time_col]=pd.to_datetime(df[time_col])
except Exception as e:
self.log.info("Issue in datetime conversion...\\n"+str(e))
df=df.set_index(time_col)
if (len(target_features)==1):
try:
if isinstance(target_features,list):
target_features=''.join(target_features)
elif isinstance(target_features,int):
target_features=str(target_features)
elif isinstance(target_features,str):
pass
except Exception as e:
self.log.info("stationary check target feature error: \\t"+str(e))
## Seasonal component for individual feature based.
seasonality_result=dict()
df,decompose_result_mult,seasonality_status = self.seasonality_model(target_features,df)
seasonality_result[target_features]=seasonality_status
elif(len(target_features) > 1):
seasonality_result=dict()
self.log.info("TS seasonality Test: The problem type is time series Multivariate.")
for col in df.columns:
df,decompose_result_mult,seasonality_status = self.seasonality_model(col,df)
seasonality_result[col]=seasonality_status
else:
self.log.info("TS seasonality Test: Error in target feature, pls check.\\n.")
# self.log.info("Feature based seasonality_result:\\n"+str(seasonality_result))
# ## Seasonal component for whole dataset
seasonality_combined_res=dict()
c_dict=[k for k,v in seasonality_result.items() if 'non seasonality' in v]
if (len(c_dict)>=1):
seasonality_combined_res['dataframe_seasonality']='No Seasonal elements'
else:
seasonality_combined_res['dataframe_seasonality']='contains seasonal elements.'
# self.log.info("Seasonality information for whole dataset:\\n"+str(season |
ality_combined_res))
self.log.info("Time series Seasonality test completed.\\n")
return df,decompose_result_mult,seasonality_result,seasonality_combined_res
#Main fn for standalone test purpose
if __name__=='__main__':
print("Inside seasonality-stationary test main function...")
print("Below code used for standalone test purpose.")
# df=pd.read_csv(r"C:\\AION_Works\\Data\\order_forecast_ts.csv")
# print("df info: \\n",df.info())
# df=df.drop('index',axis=1)
# time_col="DateTime"
# target='order1'
# stationary_method='adfuller'
# seasonal_model="additive" ## two models are available: 1.multiplicative, 2.additive
# if (isinstance(target,list)):
# pass
# elif (isinstance(target,str)):
# target=list(target.split(','))
# cls_ins=aion_ts_stationary_seassonality_test(df)
# stats_model,n_lags,p_value=cls_ins.stationary_check(target,time_col,stationary_method)
# df,decompose_result_mult=cls_ins.seasonal_check(target,time_col,seasonal_model)
# print(" Time series stationary and seasonality check completed.")<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from .imports import importModule
supported_reader = ['sqlite', 'influx','s3']
functions_code = {
'dataReader':{'imports':[{'mod':'json'},{'mod': 'Path', 'mod_from': 'pathlib', 'mod_as': None},{'mod': 'pandas', 'mod_from': None, 'mod_as': 'pd'}],'code':"""
class dataReader():
def get_reader(self, reader_type, target_path=None, config=None):
if reader_type == 'sqlite':
return sqlite_writer(target_path=target_path)
elif reader_type == 'influx':
return Influx_writer(config=config)
elif reader_type == 'gcs':
return gcs(config=config)
elif reader_type == 'azure':
return azure(config=config)
elif reader_type == 's3':
return s3bucket(config=config)
else:
raise ValueError(reader_type)
"""
},
'sqlite':{'imports':[{'mod':'sqlite3'},{'mod': 'pandas', 'mod_from': None, 'mod_as': 'pd'},{'mod': 'Path', 'mod_from': 'pathlib', 'mod_as': None}],'code':"""\\n\\
class sqlite_writer():
def __init__(self, target_path):
self.target_path = Path(target_path)
database_file = self.target_path.stem + '.db'
self.db = sqlite_db(self.target_path, database_file)
def file_exists(self, file):
if file:
return self.db.table_exists(file)
else:
return False
def read(self, file):
return self.db.read(file)
def write(self, data, file):
self.db.write(data, file)
def close(self):
self.db.close()
class sqlite_db():
def __init__(self, location, database_file=None):
if not isinstance(location, Path):
location = Path(location)
if database_file:
self.database_name = database_file
else:
self.database_name = location.stem + '.db'
db_file = str(location/self.database_name)
self.conn = sqlite3.connect(db_file)
self.cursor = self.conn.cursor()
self.tables = []
def table_exists(self, name):
if name in self.tables:
return True
elif name:
query = f"SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';"
listOfTables = self.cursor.execute(query).fetchall()
if len(listOfTables) > 0 :
self.tables.append(name)
return True
return False
def read(self, table_name):
return pd.read_sql_query(f"SELECT * FROM {table_name}", self.conn)
def create_table(self,name, columns, dtypes):
query = f'CREATE TABLE IF NOT EXISTS {name} ('
for column, data_type in zip(columns, dtypes):
query += f"'{column}' TEXT,"
query = query[:-1]
query += ');'
self.conn.execute(query)
return True
def write(self,data, table_name):
if not self.table_exists(table_name):
self.create_table(table_name, data.columns, data.dtypes)
tuple_data = list(data.itertuples(index=False, name=None))
insert_query = f'INSERT INTO {table_name} VALUES('
for i in range(len(data.columns)):
insert_query += '?,'
insert_query = insert_query[:-1] + ')'
self.cursor.executemany(insert_query, tuple_data)
self.conn.commit()
return True
def delete(self, name):
pass
def close(self):
self.conn.close()
"""
},
'influx':{'imports':[{'mod':'InfluxDBClient','mod_from':'influxdb'},{'mod': 'Path', 'mod_from': 'pathlib', 'mod_as': None},{'mod': 'pandas', 'mod_from': None, 'mod_as': 'pd'}],'code':"""\\n\\
class Influx_writer():
def __init__(self, config):
self.db = influx_db(config)
def file_exists(self, file):
if file:
return self.db.table_exists(file)
else:
return False
def read(self, file):
query = "SELECT * FROM {}".format(file)
if 'read_time' in self.db_config.keys() and self.db_config['read_time']:
query += f" time > now() - {self.db_config['read_time']}"
return self.db.read(query)
def write(self, data, file):
self.db.write(data, file)
def close(self):
pass
class influx_db():
def __init__(self, config):
self.host = config['host']
self.port = config['port']
self.user = config.get('user', None)
self.password = config.get('password', None)
self.token = config.get('token', None)
self.database = config['database']
self.measurement = config['measurement']
self.tags = config['tags']
self.client = self.get_client()
def table_exists(self, name):
query = f"SHOW MEASUREMENTS ON {self.database}"
result = self.client(query)
for measurement in result['measurements']:
if measurement['name'] == name:
return True
return False
def read(self, query)->pd.DataFrame:
cursor = self.client.query(query)
points = cursor.get_points()
my_list=list(points)
df=pd.DataFrame(my_list)
return df
def get_client(self):
headers = None
if self.token:
headers={"Authorization": self.token}
client = InfluxDBClient(self.host,self.port,self.user, self.password,headers=headers)
databases = client.get_list_database()
databases = [x['name'] for x in databases]
if self.database not in databases:
client.create_database(self.database)
return InfluxDBClient(self.host,self.port,self.user,self.password,self.database,headers=headers)
def write(self,data, measurement=None):
if isinstance(data, pd.DataFrame):
sorted_col = data.columns.tolist()
sorted_col.sort()
data = data[sorted_col]
data = data.to_dict(orient='records')
if not measurement:
measurement = self.measurement
for row in data:
if 'time' in row.keys():
p = '%Y-%m-%dT%H:%M:%S.%fZ'
time_str = datetime.strptime(row['time'], p)
del row['time']
else:
time_str = None
if 'model_ver' in row.keys():
self.tags['model_ver']= row['model_ver']
del row['model_ver']
json_body = [{
'measurement': measurement,
'time': time_str,
'tags': self.tags,
'fields': row
}]
self.client.write_points(json_body)
def delete(self, name):
pass
def close(self):
self.client.close()
"""
},
's3':{'imports':[{'mod':'boto3'},{'mod': 'ClientError', 'mod_from': 'botocore.exceptions'},{'mod': 'Path', 'mod_from': 'pathlib'},{'mod': 'pandas', 'mod_as': 'pd'}],'code':"""\\n\\
class s3bucket():
def __init__(self, config={}):
if 's3' in config.keys():
config = config['s3']
aws_access_key_id = config.get('aws_access_key_id','')
aws_secret_access_key = config.get('aws_secret_access_key','')
bucket_name = config.get('bucket_name','')
if not aws_access_key_id:
raise ValueError('aws_access_key_id can not be empty')
if not aws_secret_access_key:
raise ValueError('aws_secret_access_key can not be empty')
self.client = boto3.client('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=str(aws_secret_access_key))
self.bucket_name = bucket_name
def read(self, file_name):
try:
response = self.client.get_object(Bucket=self.bucket_name, Key=file_name)
return pd.read_csv(response['Body'])
except ClientError as ex:
if ex.response['Error']['Code'] == 'NoSuchBucket':
raise ValueError(f"Bucket '{self.bucket_name}' not found in aws s3 storage")
elif ex.response['Error']['Code'] == 'NoSuchKey':
raise ValueError(f"File '{file_name}' not found in s3 bucket '{self.bucket_name}'")
else:
raise
"""
},
'azure':{'imports':[{'mod':'DataLakeServiceClient', 'mod_from':'azure.storage.filedatalake'},{'mod':'detect', 'mod_from':'detect_delimiter'},{'mod':'pandavro', 'mod_as':'pdx'},{'mod':'io'},{'mod': 'Path', 'mod_from': 'pathlib'},{'mod': 'pandas', 'mod_as': 'pd'}],'code':"""\\n\\
def azure():
def __init__(self,config={}):
if 'azure' in config.keys():
config = config['azure']
account_name = config.get('account_name','')
account_key = config.get('account_key','')
container_name = config.get('container_name','')
if not account_name:
raise ValueError('Account name can not be empty')
if not account_key:
raise ValueError('Account key can not be empty')
if not container_name:
raise ValueError('Container name can not be empty')
service_client = DataLakeServiceClient(account_url="{}://{}.dfs.core.windows.net".format("https", account_name), credential=account_key)
self.file_system_client = service_client.get_file_system_client(container_name)
def read(self, directory_name):
root_dir = str(directory_name)
file_paths = self.file_system_client.get_paths(path=root_dir)
main_df = pd.DataFrame()
for path in file_paths:
if not path.is_directory:
file_client = file_system_client.get_file_client(path.name)
file_ext = Path(path.name).suffix
if file_ext in [".csv", ".tsv"]:
with open(csv_local, "wb") as my_file:
file_client.download_file().readinto(my_file)
with open(csv_local, 'r') as file:
data = file.read()
row_delimiter = detect(text=data, default=None, whitelist=[',', ';', ':', '|', '\\t'])
processed_df = pd.read_csv(csv_local, sep=row_delimiter)
elif file_ext == ".parquet":
stream = io.BytesIO()
file_client.download_file().readinto(stream)
processed_df = pd.read_parquet(stream, engine='pyarrow')
elif file_ext == ".avro":
with open(avro_local, "wb") as my_file:
file_client.download_file().readinto(my_file)
processed_df = pdx.read_avro(avro_local)
if main_df.empty:
main_df = pd.DataFrame(processed_df)
else:
main_df = main_df.append(processed_df, ignore_index=True)
return main_df
"""
},
'gcs':{'imports':[{'mod':'storage','mod_from':'google.cloud'},{'mod': 'Path', 'mod_from': 'pathlib'},{'mod': 'pandas', 'mod_as': 'pd'}],'code':"""\\n\\
class gcs():
def __ |
init__(self, config={}):
if 'gcs' in config.keys():
config = config['gcs']
account_key = config.get('account_key','')
bucket_name = config.get('bucket_name','')
if not account_key:
raise ValueError('Account key can not be empty')
if not bucket_name:
raise ValueError('bucket name can not be empty')
storage_client = storage.Client.from_service_account_json(account_key)
self.bucket = storage_client.get_bucket(bucket_name)
def read(self, bucket_name, file_name):
data = self.bucket.blob(file_name).download_as_text()
return pd.read_csv(data, encoding = 'utf-8', sep = ',')
"""
}
}
class data_reader():
def __init__(self, reader_type=[]):
self.supported_readers = supported_reader
if isinstance(reader_type, str):
self.readers = [reader_type]
elif not reader_type:
self.readers = self.supported_readers
else:
self.readers = reader_type
unsupported_reader = [ x for x in self.readers if x not in self.supported_readers]
if unsupported_reader:
raise ValueError(f"reader type '{unsupported_reader}' is not supported\\nSupported readers are {self.supported_readers}")
self.codeText = ""
self.importer = importModule()
def get_reader_code(self, readers):
reader_code = {
'sqlite': 'return sqlite_writer(target_path=target_path)',
'influx': 'return Influx_writer(config=config)',
'gcs': 'return gcs(config=config)',
'azure': 'return azure(config=config)',
's3': 'return s3bucket(config=config)'
}
code = "\\n\\ndef dataReader(reader_type, target_path=None, config=None):\\n"
for i, reader in enumerate(readers):
if not i:
code += f" if reader_type == '{reader}':\\n"
else:
code += f" elif reader_type == '{reader}':\\n"
code += f" {reader_code[reader]}\\n"
if readers:
code += " else:\\n"
code += f""" raise ValueError("'{{reader_type}}' not added during code generation")\\n"""
else:
code += f""" raise ValueError("'{{reader_type}}' not added during code generation")\\n"""
return code
def get_code(self):
code = self.get_reader_code(self.readers)
functions = []
for reader in self.readers:
functions.append(reader)
for function in functions:
code += self.get_function_code(function)
self.codeText += self.importer.getCode()
self.codeText += code
return self.codeText
def get_function_code(self, name):
code = ""
if name in functions_code.keys():
code += functions_code[name]['code']
if self.importer:
if 'imports' in functions_code[name].keys():
for module in functions_code[name]['imports']:
mod_name = module['mod']
mod_from = module.get('mod_from', None)
mod_as = module.get('mod_as', None)
self.importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as)
return code
def get_importer(self):
return self.importer
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
class output_drift():
def __init__(self, missing=False, word2num_features = None, cat_encoder=False, target_encoder=False, normalizer=False, text_profiler=False, feature_reducer=False, score_smaller_is_better=True, problem_type='classification', tab_size=4):
self.tab = ' ' * tab_size
self.codeText = ''
self.missing = missing
self.word2num_features = word2num_features
self.cat_encoder = cat_encoder
self.target_encoder = target_encoder
self.normalizer = normalizer
self.text_profiler = text_profiler
self.feature_reducer = feature_reducer
self.score_smaller_is_better = score_smaller_is_better
self.problem_type = problem_type
def addDatabaseClass(self, indent=0):
text = "\\
\\nclass database():\\
\\n def __init__(self, config):\\
\\n self.host = config['host']\\
\\n self.port = config['port']\\
\\n self.user = config['user']\\
\\n self.password = config['password']\\
\\n self.database = config['database']\\
\\n self.measurement = config['measurement']\\
\\n self.tags = config['tags']\\
\\n self.client = self.get_client()\\
\\n\\
\\n def read_data(self, query)->pd.DataFrame:\\
\\n cursor = self.client.query(query)\\
\\n points = cursor.get_points()\\
\\n my_list=list(points)\\
\\n df=pd.DataFrame(my_list)\\
\\n return df\\
\\n\\
\\n def get_client(self):\\
\\n client = InfluxDBClient(self.host,self.port,self.user,self.password)\\
\\n databases = client.get_list_database()\\
\\n databases = [x['name'] for x in databases]\\
\\n if self.database not in databases:\\
\\n client.create_database(self.database)\\
\\n return InfluxDBClient(self.host,self.port,self.user,self.password, self.database)\\
\\n\\
\\n def write_data(self,data):\\
\\n if isinstance(data, pd.DataFrame):\\
\\n sorted_col = data.columns.tolist()\\
\\n sorted_col.sort()\\
\\n data = data[sorted_col]\\
\\n data = data.to_dict(orient='records')\\
\\n for row in data:\\
\\n if 'time' in row.keys():\\
\\n p = '%Y-%m-%dT%H:%M:%S.%fZ'\\
\\n time_str = datetime.strptime(row['time'], p)\\
\\n del row['time']\\
\\n else:\\
\\n time_str = None\\
\\n if 'model_ver' in row.keys():\\
\\n self.tags['model_ver']= row['model_ver']\\
\\n del row['model_ver']\\
\\n json_body = [{\\
\\n 'measurement': self.measurement,\\
\\n 'time': time_str,\\
\\n 'tags': self.tags,\\
\\n 'fields': row\\
\\n }]\\
\\n self.client.write_points(json_body)\\
\\n\\
\\n def close(self):\\
\\n self.client.close()\\
\\n"
if indent:
text = text.replace('\\n', (self.tab * indent) + '\\n')
return text
def addPredictClass(self, indent=0):
text = "\\
\\nclass predict():\\
\\n\\
\\n def __init__(self, base_config):\\
\\n self.usecase = base_config['modelName'] + '_' + base_config['modelVersion']\\
\\n self.dataLocation = base_config['dataLocation']\\
\\n self.db_enabled = base_config.get('db_enabled', False)\\
\\n if self.db_enabled:\\
\\n self.db_config = base_config['db_config']\\
\\n home = Path.home()\\
\\n if platform.system() == 'Windows':\\
\\n from pathlib import WindowsPath\\
\\n output_data_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'Data'\\
\\n output_model_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'target'/self.usecase\\
\\n else:\\
\\n from pathlib import PosixPath\\
\\n output_data_dir = PosixPath(home)/'HCLT'/'AION'/'Data'\\
\\n output_model_dir = PosixPath(home)/'HCLT'/'AION'/'target'/self.usecase\\
\\n if not output_model_dir.exists():\\
\\n raise ValueError(f'Configuration file not found at {output_model_dir}')\\
\\n\\
\\n tracking_uri = 'file:///' + str(Path(output_model_dir)/'mlruns')\\
\\n registry_uri = 'sqlite:///' + str(Path(output_model_dir)/'mlruns.db')\\
\\n mlflow.set_tracking_uri(tracking_uri)\\
\\n mlflow.set_registry_uri(registry_uri)\\
\\n client = mlflow.tracking.MlflowClient(\\
\\n tracking_uri=tracking_uri,\\
\\n registry_uri=registry_uri,\\
\\n )\\
\\n self.model_version = client.get_latest_versions(self.usecase, stages=['production'] )[0].version\\
\\n model_version_uri = 'models:/{model_name}/production'.format(model_name=self.usecase)\\
\\n self.model = mlflow.pyfunc.load_model(model_version_uri)\\
\\n run = client.get_run(self.model.metadata.run_id)\\
\\n if run.info.artifact_uri.startswith('file:'): #remove file:///\\
\\n self.artifact_path = Path(run.info.artifact_uri[len('file:///') : ])\\
\\n else:\\
\\n self.artifact_path = Path(run.info.artifact_uri)\\
\\n with open(self.artifact_path/'deploy.json', 'r') as f:\\
\\n deployment_dict = json.load(f)\\
\\n with open(self.artifact_path/'features.txt', 'r') as f:\\
\\n self.train_features = f.readline().rstrip().split(',')\\
\\n\\
\\n self.dataLocation = base_config['dataLocation']\\
\\n self.selected_features = deployment_dict['load_data']['selected_features']\\
\\n self.target_feature = deployment_dict['load_data']['target_feature']\\
\\n self.output_model_dir = output_model_dir"
if self.missing:
text += "\\n self.missing_values = deployment_dict['transformation']['fillna']"
if self.word2num_features:
text += "\\n self.word2num_features = deployment_dict['transformation']['word2num_features']"
if self.cat_encoder == 'labelencoding':
text += "\\n self.cat_encoder = deployment_dict['transformation']['cat_encoder']"
elif (self.cat_encoder == 'targetencoding') or (self.cat_encoder == 'onehotencoding'):
text += "\\n self.cat_encoder = deployment_dict['transformation']['cat_encoder']['file']"
text += "\\n self.cat_encoder_cols = deployment_dict['transformation']['cat_encoder']['features']"
if self.target_encoder:
text += "\\n self.target_encoder = joblib.load(self.artifact_path/deployment_dict['transformation']['target_encoder'])"
if self.normalizer:
text += "\\n self.normalizer = joblib.load(self.artifact_path/deployment_dict['transformation']['normalizer']['file'])\\
\\n self.normalizer_col = deployment_dict['transformation']['normalizer']['features']"
if self.text_profiler:
text += "\\n self.text_profiler = joblib.load(self.artifact_path/deployment_dict['transformation']['Status']['text_profiler']['file'])\\
\\n self.text_profiler_col = deployment_dict['transformation']['Status']['text_profiler']['features']"
if self.feature_reducer:
text += "\\n self.feature_reducer = joblib.load(self.artifact_path/deployment_dict['featureengineering']['feature_reducer']['file'])\\
\\n self.feature_reducer_cols = deployment_dict['featureengineering']['feature_reducer']['features']"
text += """
def read_data_from_db(self):
if self.db_enabled:
try:
db = database(self.db_config)
query = "SELECT * FROM {} WHERE model_ver = '{}' AND {} != ''".format(db.measurement, self.model_version, self.target_feature)
if 'read_time' in self.db_config.keys() and self.db_config['read_time']:
query += f" time > now() - {self.db_config['read_time']}"
data = db.read_data(query)
except:
raise ValueError('Unable to read from the database')
finally:
if db:
db.close()
return data
return None"""
text += "\\
\\n def predict(self, data):\\
\\n df = pd.DataFrame()\\
\\n if Path(data).exists():\\
\\n if Path(data).suffix == '.tsv':\\
\\n df=read_data(data,encoding='utf-8',sep='\\t')\\
\\n elif Path(data).suffix == '.csv':\\
\\n df=read_data(data,encoding='utf-8')\\
\\n else:\\
\\n if Path(data).suffix == '.json':\\
\\n jsonData = read_json(data)\\
\\n df = pd.json_normalize(jsonData)\\
\\n elif is_file_name_url(data):\\
\\n df = read_data(data,encoding='utf-8')\\
\\n else:\\
\\n jsonData = json.loads(data)\\
\\n df = pd.json_normalize(jsonData)\\
\\n if len(df) == 0:\\
\\n raise ValueError('No data record found')\\
\\n missing_features = [x for x in self.selected_features if x not in df.columns]\\
\\n if missing_features:\\
\\n raise ValueError(f'some feature/s is/are missing: {missing_features}')\\
\\n if self.target_feature not in df.columns:\\
\\n raise ValueError(f'Ground truth values/target column({self.target_feature}) |
not found in current data')\\
\\n df_copy = df.copy()\\
\\n df = df[self.selected_features]"
if self.word2num_features:
text += "\\n for feat in self.word2num_features:"
text += "\\n df[ feat ] = df[feat].apply(lambda x: s2n(x))"
if self.missing:
text += "\\n df.fillna(self.missing_values, inplace=True)"
if self.cat_encoder == 'labelencoding':
text += "\\n df.replace(self.cat_encoder, inplace=True)"
elif self.cat_encoder == 'targetencoding':
text += "\\n cat_enc = joblib.load(self.artifact_path/self.cat_encoder)"
text += "\\n df = cat_enc.transform(df)"
elif self.cat_encoder == 'onehotencoding':
text += "\\n cat_enc = joblib.load(self.artifact_path/self.cat_encoder)"
text += "\\n transformed_data = cat_enc.transform(df[self.cat_encoder_cols]).toarray()"
text += "\\n df[cat_enc.get_feature_names()] = pd.DataFrame(transformed_data, columns=cat_enc.get_feature_names())[cat_enc.get_feature_names()]"
if self.normalizer:
text += "\\n df[self.normalizer_col] = self.normalizer.transform(df[self.normalizer_col])"
if self.text_profiler:
text += "\\n text_corpus = df[self.text_profiler_col].apply(lambda row: ' '.join(row.values.astype(str)), axis=1)\\
\\n df_vect=self.text_profiler.transform(text_corpus)\\
\\n if isinstance(df_vect, np.ndarray):\\
\\n df1 = pd.DataFrame(df_vect)\\
\\n else:\\
\\n df1 = pd.DataFrame(df_vect.toarray(),columns = self.text_profiler.named_steps['vectorizer'].get_feature_names())\\
\\n df1 = df1.add_suffix('_vect')\\
\\n df = pd.concat([df, df1],axis=1)"
if self.feature_reducer:
text += "\\n df = self.feature_reducer.transform(df[self.feature_reducer_cols])"
else:
text += "\\n df = df[self.train_features]"
if self.target_encoder:
text += "\\n output = pd.DataFrame(self.model._model_impl.predict_proba(df), columns=self.target_encoder.classes_)\\
\\n df_copy['prediction'] = output.idxmax(axis=1)"
else:
text += "\\n output = self.model.predict(df).reshape(1, -1)[0].round(2)\\
\\n df_copy['prediction'] = output"
text += "\\n return df_copy"
if indent:
text = text.replace('\\n', (self.tab * indent) + '\\n')
return text
def getClassificationMatrixCode(self, indent=0):
text = "\\
\\ndef get_classification_metrices(actual_values, predicted_values):\\
\\n result = {}\\
\\n accuracy_score = sklearn.metrics.accuracy_score(actual_values, predicted_values)\\
\\n avg_precision = sklearn.metrics.precision_score(actual_values, predicted_values,\\
\\n average='macro')\\
\\n avg_recall = sklearn.metrics.recall_score(actual_values, predicted_values,\\
\\n average='macro')\\
\\n avg_f1 = sklearn.metrics.f1_score(actual_values, predicted_values,\\
\\n average='macro')\\
\\n\\
\\n result['accuracy'] = accuracy_score\\
\\n result['precision'] = avg_precision\\
\\n result['recall'] = avg_recall\\
\\n result['f1'] = avg_f1\\
\\n return result\\
\\n\\
"
if indent:
text = text.replace('\\n', (self.tab * indent) + '\\n')
return text
def getRegrssionMatrixCode(self, indent=0):
text = "\\
\\ndef get_regression_metrices( actual_values, predicted_values):\\
\\n result = {}\\
\\n\\
\\n me = np.mean(predicted_values - actual_values)\\
\\n sde = np.std(predicted_values - actual_values, ddof = 1)\\
\\n\\
\\n abs_err = np.abs(predicted_values - actual_values)\\
\\n mae = np.mean(abs_err)\\
\\n sdae = np.std(abs_err, ddof = 1)\\
\\n\\
\\n abs_perc_err = 100.*np.abs(predicted_values - actual_values) / actual_values\\
\\n mape = np.mean(abs_perc_err)\\
\\n sdape = np.std(abs_perc_err, ddof = 1)\\
\\n\\
\\n result['mean_error'] = me\\
\\n result['mean_abs_error'] = mae\\
\\n result['mean_abs_perc_error'] = mape\\
\\n result['error_std'] = sde\\
\\n result['abs_error_std'] = sdae\\
\\n result['abs_perc_error_std'] = sdape\\
\\n return result\\
\\n\\
"
if indent:
text = text.replace('\\n', (self.tab * indent) + '\\n')
return text
def addSuffixCode(self, indent=1):
text ="\\n\\
\\ndef check_drift( config):\\
\\n prediction = predict(config)\\
\\n usecase = config['modelName'] + '_' + config['modelVersion']\\
\\n train_data_path = prediction.artifact_path/(usecase+'_data.csv')\\
\\n if not train_data_path.exists():\\
\\n raise ValueError(f'Training data not found at {train_data_path}')\\
\\n curr_with_pred = prediction.read_data_from_db()\\
\\n if prediction.target_feature not in curr_with_pred.columns:\\
\\n raise ValueError('Ground truth not updated for corresponding data in database')\\
\\n train_with_pred = prediction.predict(train_data_path)\\
\\n performance = {}"
if self.problem_type == 'classification':
text += "\\n\\
\\n performance['train'] = get_classification_metrices(train_with_pred[prediction.target_feature], train_with_pred['prediction'])\\
\\n performance['current'] = get_classification_metrices(curr_with_pred[prediction.target_feature], curr_with_pred['prediction'])"
else:
text += "\\n\\
\\n performance['train'] = get_regression_metrices(train_with_pred[prediction.target_feature], train_with_pred['prediction'])\\
\\n performance['current'] = get_regression_metrices(curr_with_pred[prediction.target_feature], curr_with_pred['prediction'])"
text += "\\n return performance"
text += "\\n\\
\\nif __name__ == '__main__':\\
\\n try:\\
\\n if len(sys.argv) < 2:\\
\\n raise ValueError('config file not present')\\
\\n config = sys.argv[1]\\
\\n if Path(config).is_file() and Path(config).suffix == '.json':\\
\\n with open(config, 'r') as f:\\
\\n config = json.load(f)\\
\\n else:\\
\\n config = json.loads(config)\\
\\n output = check_drift(config)\\
\\n status = {'Status':'Success','Message':json.loads(output)}\\
\\n print('output_drift:'+json.dumps(status))\\
\\n except Exception as e:\\
\\n status = {'Status':'Failure','Message':str(e)}\\
\\n print('output_drift:'+json.dumps(status))"
if indent:
text = text.replace('\\n', (self.tab * indent) + '\\n')
return text
def addStatement(self, statement, indent=1):
self.codeText += '\\n' + self.tab * indent + statement
def generateCode(self):
self.codeText += self.addDatabaseClass()
self.codeText += self.addPredictClass()
if self.problem_type == 'classification':
self.codeText += self.getClassificationMatrixCode()
elif self.problem_type == 'regression':
self.codeText += self.getRegrssionMatrixCode()
else:
raise ValueError(f"Unsupported problem type: {self.problem_type}")
self.codeText += self.addSuffixCode()
def getCode(self):
return self.codeText
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class transformer():
def __init__(self, indent=0, tab_size=4):
self.df_name = 'df'
self.tab = ' ' * tab_size
self.codeText = ""
self.transformers = []
self.TxCols = []
self.imputers = {}
self.input_files = {}
self.output_files = {}
self.function_code = ''
self.addInputFiles({'inputData' : 'rawData.dat', 'metaData' : 'modelMetaData.json','log' : 'aion.log','trainData' : 'transformedData.dat','testData' : 'test.dat','preprocessor' : 'preprocessor.pkl'})
def addInputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def addOutputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def getInputFiles(self):
text = 'IOFiles = '
if not self.input_files:
text += '{ }'
else:
text += json.dumps(self.input_files, indent=4)
return text
def getOutputFiles(self):
text = 'output_file = '
if not self.output_files:
text += '{ }'
else:
text += json.dumps(self.output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\\n'
text += self.getInputFiles()
if indent:
text = text.replace('\\n', self.tab * indent + '\\n')
return text
def __addValidateConfigCode(self):
text = "\\n\\
\\ndef validateConfig():\\
\\n config_file = Path(__file__).parent/'config.json'\\
\\n if not Path(config_file).exists():\\
\\n raise ValueError(f'Config file is missing: {config_file}')\\
\\n config = read_json(config_file)\\
\\n return config"
return text
def getPrefixModules(self):
modules = [
{'module':'Path', 'mod_from':'pathlib'}
,{'module':'pandas', 'mod_as':'pd'}
,{'module':'numpy', 'mod_as':'np'}
,{'module':'scipy'}
]
return modules
def addPrefixCode(self, indent=1):
self.codeText += """
def transformation(log):
config = validateConfig()
targetPath = Path('aion')/config['targetPath']
if not targetPath.exists():
raise ValueError(f'targetPath does not exist')
meta_data_file = targetPath/IOFiles['metaData']
if meta_data_file.exists():
meta_data = read_json(meta_data_file)
else:
raise ValueError(f'Configuration file not found: {meta_data_file}')
log_file = targetPath/IOFiles['log']
log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)
dataLoc = targetPath/IOFiles['inputData']
if not dataLoc.exists():
return {'Status':'Failure','Message':'Data location does not exists.'}
status = dict()
df = read_data(dataLoc)
log.log_dataframe(df)
target_feature = config['target_feature']
if config['test_ratio'] == 0.0:
train_data = df
test_data = pd.DataFrame()
else:
"""
def getSuffixModules(self):
modules = [{'module':'pandas','mod_as':'pd'}
,{'module':'json'}
,{'module':'joblib'}
]
return modules
def addSuffixCode(self,encoder=False, indent=1):
self.codeText += """
train_data, preprocess_pipe, label_encoder = profilerObj.transform()
if not preprocess_pipe:
raise ValueError('Pipeline not created')
joblib.dump(preprocess_pipe, targetPath/IOFiles['preprocessor'])
test_data.reset_index(inplace=True)
"""
if encoder:
self.codeText += """
joblib.dump(label_encoder, targetPath/IOFiles['targetEncoder'])
if not test_data.empty:
ytest = label_encoder.transform(test_data[target_feature])
"""
else:
self.codeText += """
if not test_data.empty:
ytest = test_data[target_feature]
"""
self.codeText += """
test_data.astype(profilerObj.train_features_type)
test_data = preprocess_pipe.transform(test_data)
if isinstance(test_data, scipy.sparse.spmatrix):
test_data = test_data.toarray()
preprocess_out_columns = train_data.columns.tolist()
preprocess_out_columns.remove(target_feature)
write_data(train_data,targetPath/IOFiles['trainData'],index=False)
if isinstance( test_data, np.ndarray):
test_data = pd.DataFrame(test_data, columns=preprocess_out_columns)
test_data[target_feature] = ytest
write_data(test_data,targetPath/IOFiles['testData'],index=False)
|
log.log_dataframe(train_data)
status = {'Status':'Success','trainData':IOFiles['trainData'],'testData':IOFiles['testData']}
meta_data['transformation'] = {}
meta_data['transformation']['cat_features'] = train_data.select_dtypes('category').columns.tolist()
meta_data['transformation']['preprocessor'] = IOFiles['preprocessor']
meta_data['transformation']['preprocess_out_columns'] = preprocess_out_columns
"""
if encoder:
self.codeText += """
meta_data['transformation']['target_encoder'] = IOFiles['targetEncoder']
"""
self.codeText += """
meta_data['transformation']['Status'] = status
write_json(meta_data, str(targetPath/IOFiles['metaData']))
log.info(f"Transformed data saved at {targetPath/IOFiles['trainData']}")
log.info(f'output: {status}')
return json.dumps(status)
"""
def getMainCodeModules(self):
modules = [{'module':'Path', 'mod_from':'pathlib'}
,{'module':'sys'}
,{'module':'json'}
,{'module':'logging'}
,{'module':'argparse'}
]
return modules
def addMainCode(self, indent=1):
self.codeText += "\\n\\
\\nif __name__ == '__main__':\\
\\n log = None\\
\\n try:\\
\\n print(transformation(log))\\
\\n except Exception as e:\\
\\n if log:\\
\\n log.error(e, exc_info=True)\\
\\n status = {'Status':'Failure','Message':str(e)}\\
\\n print(json.dumps(status))"
def addValidateConfigCode(self, indent=1):
self.function_code += self.__addValidateConfigCode()
def addLocalFunctionsCode(self):
self.addValidateConfigCode()
def addStatement(self, statement, indent=1):
self.codeText += '\\n' + self.tab * indent + statement
def getCode(self, indent=1):
return self.function_code + '\\n' + self.codeText
def getDFName(self):
return self.df_name
class data_profiler():
def __init__(self, importer, text_features=False):
self.importer = importer
self.codeText = ""
self.text_features = text_features
def addStatement(self, statement, indent=1):
self.codeText += '\\n' + self.tab * indent + statement
def get_module_import_statement(self, mod):
text = ""
if not mod.get('module', None):
return text
if mod.get('mod_from', None):
text += f"from {mod['mod_from']} "
text += f"import {mod['module']} "
if mod.get('mod_as', None):
text += f"as {mod['mod_as']}"
text += "\\n"
return text
def get_import_modules(self):
profiler_importes = [
{'module': 'scipy', 'mod_from': None, 'mod_as': None},
{'module': 'numpy', 'mod_from': None, 'mod_as': 'np'},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'w2n', 'mod_from': 'word2number', 'mod_as': None},
{'module': 'LabelEncoder', 'mod_from': 'sklearn.preprocessing', 'mod_as': None },
{'module': 'OrdinalEncoder', 'mod_from': 'sklearn.preprocessing', 'mod_as': None },
{'module': 'OneHotEncoder', 'mod_from': 'sklearn.preprocessing', 'mod_as': None },
{'module': 'SimpleImputer', 'mod_from': 'sklearn.impute', 'mod_as': None },
{'module': 'KNNImputer', 'mod_from': 'sklearn.impute', 'mod_as': None },
{'module': 'Pipeline', 'mod_from': 'sklearn.pipeline', 'mod_as': None },
{'module': 'FeatureUnion', 'mod_from': 'sklearn.pipeline', 'mod_as': None },
{'module': 'MinMaxScaler', 'mod_from': 'sklearn.preprocessing', 'mod_as': None },
{'module': 'StandardScaler', 'mod_from': 'sklearn.preprocessing', 'mod_as': None },
{'module': 'PowerTransformer', 'mod_from': 'sklearn.preprocessing', 'mod_as': None },
{'module': 'ColumnTransformer', 'mod_from': 'sklearn.compose', 'mod_as': None },
{'module': 'TransformerMixin', 'mod_from': 'sklearn.base', 'mod_as': None },
{'module': 'IsolationForest', 'mod_from': 'sklearn.ensemble', 'mod_as': None },
{'module': 'TargetEncoder', 'mod_from': 'category_encoders', 'mod_as': None }
]
if self.text_features:
profiler_importes.append({'module': 'textProfiler', 'mod_from': 'text.textProfiler', 'mod_as': None })
profiler_importes.append({'module': 'textCombine', 'mod_from': 'text.textProfiler', 'mod_as': None })
return profiler_importes
def get_importer(self):
return self.importer
def get_code(self):
common_importes = self.get_import_modules()
for module in common_importes:
mod_name = module['module']
mod_from = module.get('mod_from', None)
mod_as = module.get('mod_as', None)
if module['module'] in ['textProfiler','textCombine']:
self.importer.addLocalModule(mod_name, mod_from=mod_from, mod_as=mod_as)
else:
self.importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as)
self.codeText += """
STR_TO_CAT_CONVERSION_LEN_MAX = 10
log_suffix = f'[{Path(__file__).stem}] '
target_encoding_method_change = {'targetencoding': 'labelencoding'}
supported_method = {
'fillNa':
{
'categorical' : ['mode','zero','na'],
'numeric' : ['median','mean','knnimputer','zero','drop','na'],
},
'categoryEncoding': ['labelencoding','targetencoding','onehotencoding','na','none'],
'normalization': ['standardscaler','minmax','lognormal', 'na','none'],
'outlier_column_wise': ['iqr','zscore', 'disable'],
'outlierOperation': ['dropdata', 'average', 'nochange']
}
def findiqrOutlier(df):
Q1 = df.quantile(0.25)
Q3 = df.quantile(0.75)
IQR = Q3 - Q1
index = ~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR)))
return index
def findzscoreOutlier(df):
z = np.abs(scipy.stats.zscore(df))
index = (z < 3)
return index
def findiforestOutlier(df):
isolation_forest = IsolationForest(n_estimators=100)
isolation_forest.fit(df)
y_pred_train = isolation_forest.predict(df)
return y_pred_train == 1
def get_one_true_option(d, default_value=None):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
def get_boolean(value):
if (isinstance(value, str) and value.lower() == 'true') or (isinstance(value, bool) and value == True):
return True
else:
return False
class profiler():
def __init__(self, xtrain, ytrain=None, target=None, encode_target = True, config={}, keep_unprocessed=[], log=None):
if not isinstance(xtrain, pd.DataFrame):
raise ValueError(f'{log_suffix}supported data type is pandas.DataFrame but provide data is of {type(xtrain)} type')
if xtrain.empty:
raise ValueError(f'{log_suffix}Data frame is empty')
if target and target in xtrain.columns:
self.target = xtrain[target]
xtrain.drop(target, axis=1, inplace=True)
self.target_name = target
elif ytrain:
self.target = ytrain
self.target_name = 'target'
else:
self.target = pd.Series()
self.target_name = None
self.encode_target = encode_target
self.label_encoder = None
keep_unprocessed = [x for x in keep_unprocessed if x in xtrain.columns]
if keep_unprocessed:
self.unprocessed = xtrain[keep_unprocessed]
self.data = xtrain.drop(keep_unprocessed, axis=1)
else:
self.data = xtrain
self.unprocessed = pd.DataFrame()
self.colm_type = {}
for colm, infer_type in zip(self.data.columns, self.data.dtypes):
self.colm_type[colm] = infer_type
self.numeric_feature = []
self.cat_feature = []
self.text_feature = []
self.wordToNumericFeatures = []
self.added_features = []
self.pipeline = []
self.dropped_features = {}
self.train_features_type={}
self.__update_type()
self.config = config
self.featureDict = config.get('featureDict', [])
self.output_columns = []
self.feature_expender = []
self.text_to_num = {}
if log:
self.log = log
else:
self.log = logging.getLogger('eion')
self.type_conversion = {}
def log_dataframe(self, msg=None):
import io
buffer = io.StringIO()
self.data.info(buf=buffer)
if msg:
log_text = f'Data frame after {msg}:'
else:
log_text = 'Data frame:'
log_text += '\\\\n\\\\t'+str(self.data.head(2)).replace('\\\\n','\\\\n\\\\t')
log_text += ('\\\\n\\\\t' + buffer.getvalue().replace('\\\\n','\\\\n\\\\t'))
self.log.info(log_text)
def transform(self):
if self.is_target_available():
if self.target_name:
self.log.info(f"Target feature name: '{self.target_name}'")
self.log.info(f"Target feature size: {len(self.target)}")
else:
self.log.info(f"Target feature not present")
self.log_dataframe()
try:
self.process()
except Exception as e:
self.log.error(e, exc_info=True)
raise
pipe = FeatureUnion(self.pipeline)
self.log.info(pipe)
process_data = pipe.fit_transform(self.data, y=self.target)
self.update_output_features_names(pipe)
if isinstance(process_data, scipy.sparse.spmatrix):
process_data = process_data.toarray()
df = pd.DataFrame(process_data, columns=self.output_columns)
if self.is_target_available() and self.target_name:
df[self.target_name] = self.target
if not self.unprocessed.empty:
df[self.unprocessed.columns] = self.unprocessed
self.log_numerical_fill()
self.log_categorical_fill()
self.log_normalization()
return df, pipe, self.label_encoder
def log_type_conversion(self):
if self.log:
self.log.info('----------- Inspecting Features -----------')
self.log.info('----------- Type Conversion -----------')
count = 0
for k, v in self.type_conversion.items():
if v[0] != v[1]:
self.log.info(f'{k} -> from {v[0]} to {v[1]} : {v[2]}')
self.log.info('Status:- |... Feature inspection done')
def check_config(self):
removeDuplicate = self.config.get('removeDuplicate', False)
self.config['removeDuplicate'] = get_boolean(removeDuplicate)
self.config['misValueRatio'] = float(self.config.get('misValueRatio', '1.0'))
self.config['numericFeatureRatio'] = float(self.config.get('numericFeatureRatio', '1.0'))
self.config['categoryMaxLabel'] = int(self.config.get('categoryMaxLabel', '20'))
featureDict = self.config.get('featureDict', [])
if isinstance(featureDict, dict):
self.config['featureDict'] = []
if isinstance(featureDict, str):
self.config['featureDict'] = []
def process(self):
#remove duplicate not required at the time of prediction
self.check_config()
self.remove_constant_feature()
self.remove_empty_feature(self.config['misValueRatio'])
self.remove_index_features()
self.drop_na_target()
if self.config['removeDuplicate']:
self.drop_duplicate()
self.check_categorical_features()
self.string_to_numeric()
self.process_target()
self.train_features_type = dict(zip(self.data.columns, self.data.dtypes))
self.parse_process_step_config()
self.process_drop_fillna()
#self.log_type_conversion()
self.update_num_fill_dict()
#print(self.num_fill_method_dict)
self.update_cat_fill_dict()
self.create_pipeline()
self.text_pipeline(self.config)
self.apply_outlier()
self.log.info(self.process_method)
self.log.info(self.train_features_type)
def is_target_available(self):
return (isinstance(self.target, pd.Series) and not self.target.empty) or len(self.target)
def process_target(self, operation='encode', arg=None):
if self.encode_target:
if self.is_target_available():
self.label_encoder = LabelEncoder()
self.target = self.label_encoder.fit_transform(self.target)
return self.label_encoder
return None
def is_target_column(self, |
column):
return column == self.target_name
def fill_default_steps(self):
num_fill_method = get_one_true_option(self.config.get('numericalFillMethod',None))
normalization_method = get_one_true_option(self.config.get('normalization',None))
for colm in self.numeric_feature:
if num_fill_method:
self.fill_missing_value_method(colm, num_fill_method.lower())
if normalization_method:
self.fill_normalizer_method(colm, normalization_method.lower())
cat_fill_method = get_one_true_option(self.config.get('categoricalFillMethod',None))
cat_encode_method = get_one_true_option(self.config.get('categoryEncoding',None))
for colm in self.cat_feature:
if cat_fill_method:
self.fill_missing_value_method(colm, cat_fill_method.lower())
if cat_encode_method:
self.fill_encoder_value_method(colm, cat_encode_method.lower(), default=True)
def parse_process_step_config(self):
self.process_method = {}
user_provided_data_type = {}
for feat_conf in self.featureDict:
colm = feat_conf.get('feature', '')
if not self.is_target_column(colm):
if colm in self.data.columns:
user_provided_data_type[colm] = feat_conf['type']
if user_provided_data_type:
self.update_user_provided_type(user_provided_data_type)
self.fill_default_steps()
for feat_conf in self.featureDict:
colm = feat_conf.get('feature', '')
if not self.is_target_column(colm):
if colm in self.data.columns:
if feat_conf.get('fillMethod', None):
self.fill_missing_value_method(colm, feat_conf['fillMethod'].lower())
if feat_conf.get('categoryEncoding', None):
self.fill_encoder_value_method(colm, feat_conf['categoryEncoding'].lower())
if feat_conf.get('normalization', None):
self.fill_normalizer_method(colm, feat_conf['normalization'].lower())
if feat_conf.get('outlier', None):
self.fill_outlier_method(colm, feat_conf['outlier'].lower())
if feat_conf.get('outlierOperation', None):
self.fill_outlier_process(colm, feat_conf['outlierOperation'].lower())
def update_output_features_names(self, pipe):
columns = self.output_columns
start_index = {}
for feat_expender in self.feature_expender:
if feat_expender:
step_name = list(feat_expender.keys())[0]
index = list(feat_expender.values())[0]
for transformer_step in pipe.transformer_list:
if transformer_step[1].steps[-1][0] in step_name:
start_index[index] = {transformer_step[1].steps[-1][0]: transformer_step[1].steps[-1][1].get_feature_names()}
if start_index:
index_shifter = 0
for key,value in start_index.items():
for k,v in value.items():
if k == 'vectorizer':
v = [f'{x}_vect' for x in v]
key = key + index_shifter
self.output_columns[key:key] = v
index_shifter += len(v)
self.added_features = [*self.added_features, *v]
def text_pipeline(self, conf_json):
if self.text_feature:
pipeList = []
max_features = 2000
text_pipe = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", self.text_feature)
], remainder="drop")),
("text_fillNa",SimpleImputer(strategy='constant', fill_value='')),
("merge_text_feature", textCombine())])
obj = textProfiler()
pipeList = obj.textProfiler(conf_json, pipeList, max_features)
last_step = "merge_text_feature"
for pipe_elem in pipeList:
text_pipe.steps.append((pipe_elem[0], pipe_elem[1]))
last_step = pipe_elem[0]
text_transformer = ('text_process', text_pipe)
self.pipeline.append(text_transformer)
self.feature_expender.append({last_step:len(self.output_columns)})
def create_pipeline(self):
num_pipe = {}
for k,v in self.num_fill_method_dict.items():
for k1,v1 in v.items():
if k1 and k1 != 'none':
num_pipe[f'{k}_{k1}'] = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", v1)
], remainder="drop")),
(k, self.get_num_imputer(k)),
(k1, self.get_num_scaler(k1))
])
else:
num_pipe[f'{k}_{k1}'] = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", v1)
], remainder="drop")),
(k, self.get_num_imputer(k))
])
self.output_columns.extend(v1)
cat_pipe = {}
for k,v in self.cat_fill_method_dict.items():
for k1,v1 in v.items():
cat_pipe[f'{k}_{k1}'] = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", v1)
], remainder="drop")),
(k, self.get_cat_imputer(k)),
(k1, self.get_cat_encoder(k1))
])
if k1 not in ['onehotencoding']:
self.output_columns.extend(v1)
else:
self.feature_expender.append({k1:len(self.output_columns)})
for key, pipe in num_pipe.items():
self.pipeline.append((key, pipe))
for key, pipe in cat_pipe.items():
self.pipeline.append((key, pipe))
if not self.unprocessed.empty:
self.pipeline.append(Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", self.unprocessed.columns)
], remainder="drop"))]))
"Drop: feature during training but replace with zero during prediction "
def process_drop_fillna(self):
drop_column = []
if 'numFill' in self.process_method.keys():
for col, method in self.process_method['numFill'].items():
if method == 'drop':
self.process_method['numFill'][col] = 'zero'
drop_column.append(col)
if 'catFill' in self.process_method.keys():
for col, method in self.process_method['catFill'].items():
if method == 'drop':
self.process_method['catFill'][col] = 'zero'
drop_column.append(col)
if drop_column:
self.data.dropna(subset=drop_column, inplace=True)
def update_num_fill_dict(self):
self.num_fill_method_dict = {}
if 'numFill' in self.process_method.keys():
for f in supported_method['fillNa']['numeric']:
self.num_fill_method_dict[f] = {}
for en in supported_method['normalization']:
self.num_fill_method_dict[f][en] = []
for col in self.numeric_feature:
numFillDict = self.process_method.get('numFill',{})
normalizationDict = self.process_method.get('normalization',{})
if f == numFillDict.get(col, '') and en == normalizationDict.get(col,''):
self.num_fill_method_dict[f][en].append(col)
if not self.num_fill_method_dict[f][en] :
del self.num_fill_method_dict[f][en]
if not self.num_fill_method_dict[f]:
del self.num_fill_method_dict[f]
def update_cat_fill_dict(self):
self.cat_fill_method_dict = {}
if 'catFill' in self.process_method.keys():
for f in supported_method['fillNa']['categorical']:
self.cat_fill_method_dict[f] = {}
for en in supported_method['categoryEncoding']:
self.cat_fill_method_dict[f][en] = []
for col in self.cat_feature:
catFillDict = self.process_method.get('catFill',{})
catEncoderDict = self.process_method.get('catEncoder',{})
if f == catFillDict.get(col, '') and en == catEncoderDict.get(col,''):
self.cat_fill_method_dict[f][en].append(col)
if not self.cat_fill_method_dict[f][en] :
del self.cat_fill_method_dict[f][en]
if not self.cat_fill_method_dict[f]:
del self.cat_fill_method_dict[f]
def __update_type(self):
self.numeric_feature = self.data.select_dtypes(include='number').columns.tolist()
self.cat_feature = self.data.select_dtypes(include='category').columns.tolist()
self.date_time = self.data.select_dtypes(include='datetime').columns.tolist()
self.text_feature = self.data.select_dtypes(include='object').columns.tolist()
def update_user_provided_type(self, data_types):
allowed_types = ['numerical','categorical', 'text','date','index']
type_mapping = {'numerical': np.dtype('float'), 'float': np.dtype('float'),'categorical': 'category', 'text':np.dtype('object'),'date':'datetime64[ns]','index': np.dtype('int64'),}
mapped_type = {k:type_mapping[v] for k,v in data_types.items()}
#self.log.info(mapped_type)
self.update_type(mapped_type, 'user provided data type')
def get_type(self, as_list=False):
if as_list:
return [self.colm_type.values()]
else:
return self.colm_type
def update_type(self, data_types={}, reason=''):
invalid_features = [x for x in data_types.keys() if x not in self.data.columns]
if invalid_features:
valid_feat = list(set(data_types.keys()) - set(invalid_features))
valid_feat_type = {k:v for k,v in data_types if k in valid_feat}
else:
valid_feat_type = data_types
for k,v in valid_feat_type.items():
if v != self.colm_type[k].name:
try:
self.data.astype({k:v})
self.colm_type.update({k:self.data[k].dtype})
self.type_conversion[k] = (self.colm_type[k] , v, 'Done', reason)
except:
self.type_conversion[k] = (self.colm_type[k] , v, 'Fail', reason)
self.data = self.data.astype(valid_feat_type)
self.__update_type()
def string_to_numeric(self):
def to_number(x):
try:
return w2n.word_to_num(x)
except:
return np.nan
for col in self.text_feature:
col_values = self.data[col].copy()
col_values = pd.to_numeric(col_values, errors='coerce')
if col_values.count() >= (self.config['numericFeatureRatio'] * len(col_values)):
self.text_to_num[col] = 'float64'
self.wordToNumericFeatures.append(col)
if self.text_to_num:
columns = list(self.text_to_num.keys())
self.data[columns] = self.data[columns].apply(lambda x: to_number(x))
self.update_type(self.text_to_num)
self.log.info('----------- Inspecting Features -----------')
for col in self.text_feature:
self.log.info(f'-------> Feature : {col}')
if col in self.text_to_num:
self.log.info('----------> Numeric Status :Yes')
self.log.info('----------> Data Type Converting to numeric :Yes')
else:
self.log.info('----------> Numeric Status :No')
self.log.info(f'\\\\nStatus:- |... Feature inspection done for numeric data: {len(self.text_to_num)} feature(s) converted to numeric')
self.log.info(f'\\\\nStatus:- |... Feature word to numeric treatment done: {self.text_to_num}')
self.log.info('----------- Inspecting Features End -----------')
def check_categorical_features(self):
num_data = self.data.select_dtypes(include='number')
num_data_unique = num_data.nunique()
num_to_cat_col = {}
for i, value in enumerate(num_data_unique):
if value < self.config['categoryMaxLabel']:
num_to_cat_col[num_data_unique.index[i]] = 'category'
if num_to_cat_col:
self.update_type(num_to_cat_col, 'numerical to categorical')
str_to_cat_col = {}
str_data = self.data.select_dtypes(include='object')
str_data_unique = str_data.nunique()
for i, value in enumerate(str_data_unique):
if value < self.config['categoryMaxLabel']:
str_to_cat_col[str_data_unique.index[i]] = 'category'
for colm in str_data.columns:
if self.data[colm].str.len().max() < STR_TO_CAT_CONVERSION_LEN_MAX:
str_to_cat_col[colm] = 'category'
if str_to_cat_col:
self.update_type(str_to_cat_col, 'text to categorical')
def drop_features(self, features=[], reason='unspecified'):
if isinstance(features, str):
features = [features]
feat_to_remove = [x for x in features if x in self.data.columns]
if feat_to_remove:
|
self.data.drop(feat_to_remove, axis=1, inplace=True)
for feat in feat_to_remove:
self.dropped_features[feat] = reason
self.log_drop_feature(feat_to_remove, reason)
self.__update_type()
def drop_duplicate(self):
index = self.data.duplicated(keep='first')
if index.sum():
self.remove_rows(index, 'duplicate rows')
def drop_na_target(self):
if self.is_target_available():
self.remove_rows(self.target.isna(), 'null target values')
def log_drop_feature(self, columns, reason):
self.log.info(f'---------- Dropping {reason} features ----------')
self.log.info(f'\\\\nStatus:- |... {reason} feature treatment done: {len(columns)} {reason} feature(s) found')
self.log.info(f'-------> Drop Features: {columns}')
self.log.info(f'Data Frame Shape After Dropping (Rows,Columns): {self.data.shape}')
def log_normalization(self):
if self.process_method.get('normalization', None):
self.log.info(f'\\\\nStatus:- !... Normalization treatment done')
for method in supported_method['normalization']:
cols = []
for col, m in self.process_method['normalization'].items():
if m == method:
cols.append(col)
if cols and method != 'none':
self.log.info(f'Running {method} on features: {cols}')
def log_numerical_fill(self):
if self.process_method.get('numFill', None):
self.log.info(f'\\\\nStatus:- !... Fillna for numeric feature done')
for method in supported_method['fillNa']['numeric']:
cols = []
for col, m in self.process_method['numFill'].items():
if m == method:
cols.append(col)
if cols:
self.log.info(f'-------> Running {method} on features: {cols}')
def log_categorical_fill(self):
if self.process_method.get('catFill', None):
self.log.info(f'\\\\nStatus:-!... FillNa for categorical feature done')
for method in supported_method['fillNa']['categorical']:
cols = []
for col, m in self.process_method['catFill'].items():
if m == method:
cols.append(col)
if cols:
self.log.info(f'-------> Running {method} on features: {cols}')
def remove_constant_feature(self):
unique_values = self.data.nunique()
constant_features = []
for i, value in enumerate(unique_values):
if value == 1:
constant_features.append(unique_values.index[i])
if constant_features:
self.drop_features(constant_features, "constant")
for i in constant_features:
try:
self.numeric_feature.remove(i)
except ValueError:
pass
try:
self.cat_feature.remove(i)
except ValueError:
pass
def remove_empty_feature(self, misval_ratio=1.0):
missing_ratio = self.data.isnull().sum() / len(self.data)
missing_ratio = {k:v for k,v in zip(self.data.columns, missing_ratio)}
empty_features = [k for k,v in missing_ratio.items() if v > misval_ratio]
if empty_features:
self.drop_features(empty_features, "empty")
for i in empty_features:
try:
self.numeric_feature.remove(i)
except ValueError:
pass
try:
self.cat_feature.remove(i)
except:
pass
def remove_index_features(self):
index_feature = []
for feat in self.numeric_feature:
if self.data[feat].nunique() == len(self.data):
if (self.data[feat].sum()- sum(self.data.index) == (self.data.iloc[0][feat]-self.data.index[0])*len(self.data)):
index_feature.append(feat)
self.drop_features(index_feature, "index")
for i in index_feature:
try:
self.numeric_feature.remove(i)
except ValueError:
pass
try:
self.cat_feature.remove(i)
except:
pass
def fill_missing_value_method(self, colm, method):
if colm in self.numeric_feature:
if method in supported_method['fillNa']['numeric']:
if 'numFill' not in self.process_method.keys():
self.process_method['numFill'] = {}
if method == 'na' and self.process_method['numFill'].get(colm, None):
pass # don't overwrite
else:
self.process_method['numFill'][colm] = method
if colm in self.cat_feature:
if method in supported_method['fillNa']['categorical']:
if 'catFill' not in self.process_method.keys():
self.process_method['catFill'] = {}
if method == 'na' and self.process_method['catFill'].get(colm, None):
pass
else:
self.process_method['catFill'][colm] = method
def check_encoding_method(self, method, colm,default=False):
if not self.is_target_available() and (method.lower() == list(target_encoding_method_change.keys())[0]):
method = target_encoding_method_change[method.lower()]
if default:
self.log.info(f"Applying Label encoding instead of Target encoding on feature '{colm}' as target feature is not present")
return method
def fill_encoder_value_method(self,colm, method, default=False):
if colm in self.cat_feature:
if method.lower() in supported_method['categoryEncoding']:
if 'catEncoder' not in self.process_method.keys():
self.process_method['catEncoder'] = {}
if method == 'na' and self.process_method['catEncoder'].get(colm, None):
pass
else:
self.process_method['catEncoder'][colm] = self.check_encoding_method(method, colm,default)
else:
self.log.info(f"-------> categorical encoding method '{method}' is not supported. supported methods are {supported_method['categoryEncoding']}")
def fill_normalizer_method(self,colm, method):
if colm in self.numeric_feature:
if method in supported_method['normalization']:
if 'normalization' not in self.process_method.keys():
self.process_method['normalization'] = {}
if (method == 'na' or method == 'none') and self.process_method['normalization'].get(colm, None):
pass
else:
self.process_method['normalization'][colm] = method
else:
self.log.info(f"-------> Normalization method '{method}' is not supported. supported methods are {supported_method['normalization']}")
def apply_outlier(self):
inlier_indices = np.array([True] * len(self.data))
if self.process_method.get('outlier', None):
self.log.info('-------> Feature wise outlier detection:')
for k,v in self.process_method['outlier'].items():
if k in self.numeric_feature:
if v == 'iqr':
index = findiqrOutlier(self.data[k])
elif v == 'zscore':
index = findzscoreOutlier(self.data[k])
elif v == 'disable':
index = None
if k in self.process_method['outlierOperation'].keys():
if self.process_method['outlierOperation'][k] == 'dropdata':
inlier_indices = np.logical_and(inlier_indices, index)
elif self.process_method['outlierOperation'][k] == 'average':
mean = self.data[k].mean()
index = ~index
self.data.loc[index,[k]] = mean
self.log.info(f'-------> {k}: Replaced by Mean {mean}: total replacement {index.sum()}')
elif self.process_method['outlierOperation'][k] == 'nochange' and v != 'disable':
self.log.info(f'-------> Total outliers in "{k}": {(~index).sum()}')
if self.config.get('outlierDetection',None):
if self.config['outlierDetection'].get('IsolationForest','False') == 'True':
index = findiforestOutlier(self.data[self.numeric_feature])
inlier_indices = np.logical_and(inlier_indices, index)
self.log.info(f'-------> Numeric feature based Outlier detection(IsolationForest):')
if inlier_indices.sum() != len(self.data):
self.remove_rows( inlier_indices == False, 'outlier detection')
self.log.info('Status:- |... Outlier treatment done')
self.log.info(f'-------> Data Frame Shape After Outlier treatment (Rows,Columns): {self.data.shape}')
def remove_rows(self, indices, msg=''):
if indices.sum():
indices = ~indices
if len(indices) != len(self.data):
raise ValueError('Data Frame length mismatch')
self.data = self.data[indices]
self.data.reset_index(drop=True, inplace=True)
if self.is_target_available():
self.target = self.target[indices]
if isinstance(self.target, pd.Series):
self.target.reset_index(drop=True, inplace=True)
if not self.unprocessed.empty:
self.unprocessed = self.unprocessed[indices]
self.unprocessed.reset_index(drop=True, inplace=True)
self.log.info(f'-------> {msg} dropped rows count: {(indices == False).sum()}')
def fill_outlier_method(self,colm, method):
if colm in self.numeric_feature:
if method in supported_method['outlier_column_wise']:
if 'outlier' not in self.process_method.keys():
self.process_method['outlier'] = {}
if method != 'Disable':
self.process_method['outlier'][colm] = method
else:
self.log.info(f"-------> outlier detection method '{method}' is not supported for column wise. supported methods are {supported_method['outlier_column_wise']}")
def fill_outlier_process(self,colm, method):
if colm in self.numeric_feature:
if method in supported_method['outlierOperation']:
if 'outlierOperation' not in self.process_method.keys():
self.process_method['outlierOperation'] = {}
self.process_method['outlierOperation'][colm] = method
else:
self.log.info(f"-------> outlier process method '{method}' is not supported for column wise. supported methods are {supported_method['outlieroperation']}")
def get_cat_imputer(self,method):
if method == 'mode':
return SimpleImputer(strategy='most_frequent')
elif method == 'zero':
return SimpleImputer(strategy='constant', fill_value=0)
def get_cat_encoder(self,method):
if method == 'labelencoding':
return OrdinalEncoder(handle_unknown="error")
elif method == 'onehotencoding':
return OneHotEncoder(sparse=False,handle_unknown="error")
elif method == 'targetencoding':
if not self.is_target_available():
raise ValueError('Can not apply Target Encoding when target feature is not present')
return TargetEncoder(handle_unknown='error')
def get_num_imputer(self,method):
if method == 'mode':
return SimpleImputer(strategy='most_frequent')
elif method == 'mean':
return SimpleImputer(strategy='mean')
elif method == 'median':
return SimpleImputer(strategy='median')
elif method == 'knnimputer':
return KNNImputer()
elif method == 'zero':
return SimpleImputer(strategy='constant', fill_value=0)
def get_num_scaler(self,method):
if method == 'minmax':
return MinMaxScaler()
elif method == 'standardscaler':
return StandardScaler()
elif method == 'lognormal':
return PowerTransformer(method='yeo-johnson', standardize=False)
"""
return self.codeText
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class register():
def __init__(self, importer, indent=0, tab_size=4):
self.tab = " "*tab_size
self.codeText = ""
self.function_code = ""
self.importer = importer
self.input_files = {}
self.output_files = {}
self.addInputFiles({'log' : 'aion.log', 'metaData' : 'modelMetaData.json','model' : 'model.pkl', 'performance': 'performance.json','production':'production.json','monitor':'monitoring.json'})
def addInputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def addOutputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def getInputFiles(self):
text = 'IOFiles = '
if not self.input_files:
text += '{ }'
else:
text += json.dumps(self.input_files, indent=4)
return text
def getOutputFiles(self):
text = 'output_file = '
if not self.output_files:
text += '{ }'
else:
text += json.dumps(self.output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\\n'
text += self.getInputFiles()
if indent:
text = text.replace('\\n', self.tab * indent + '\\n')
return text
def code_imports(self):
modules = [{'module':'sys'}
,{'module':'json'}
|
,{'module':'time'}
,{'module':'platform'}
,{'module':'tempfile'}
,{'module':'sqlite3'}
,{'module':'mlflow'}
,{'module':'Path', 'mod_from':'pathlib'}
,{'module':'ViewType', 'mod_from':'mlflow.entities'}
,{'module':'MlflowClient', 'mod_from':'mlflow.tracking'}
,{'module':'ModelVersionStatus', 'mod_from':'mlflow.entities.model_registry.model_version_status'}
]
self.import_modules(modules)
def import_module(self, module, mod_from=None, mod_as=None):
self.importer.addModule(module, mod_from=mod_from, mod_as=mod_as)
def import_modules(self, modules):
if isinstance(modules, list):
for mod in modules:
if isinstance(mod, dict):
self.importer.addModule(mod['module'], mod_from= mod.get('mod_from', None), mod_as=mod.get('mod_as', None))
def getImportCode(self):
return self.importer.getCode()
def __addValidateConfigCode(self, models=None):
text = "\\n\\
\\ndef validateConfig():\\
\\n config_file = Path(__file__).parent/'config.json'\\
\\n if not Path(config_file).exists():\\
\\n raise ValueError(f'Config file is missing: {config_file}')\\
\\n config = read_json(config_file)\\
\\n return config\\
"
return text
def addLocalFunctionsCode(self, models):
self.function_code += self.__addValidateConfigCode(models)
def addPrefixCode(self, indent=1):
self.code_imports()
self.codeText += "\\n\\
\\ndef __merge_logs(log_file_sequence,path, files):\\
\\n if log_file_sequence['first'] in files:\\
\\n with open(path/log_file_sequence['first'], 'r') as f:\\
\\n main_log = f.read()\\
\\n files.remove(log_file_sequence['first'])\\
\\n for file in files:\\
\\n with open(path/file, 'r') as f:\\
\\n main_log = main_log + f.read()\\
\\n (path/file).unlink()\\
\\n with open(path/log_file_sequence['merged'], 'w') as f:\\
\\n f.write(main_log)\\
\\n\\
\\ndef merge_log_files(folder, models):\\
\\n log_file_sequence = {\\
\\n 'first': 'aion.log',\\
\\n 'merged': 'aion.log'\\
\\n }\\
\\n log_file_suffix = '_aion.log'\\
\\n log_files = [x+log_file_suffix for x in models if (folder/(x+log_file_suffix)).exists()]\\
\\n log_files.append(log_file_sequence['first'])\\
\\n __merge_logs(log_file_sequence, folder, log_files)\\
\\n\\
\\ndef register_model(targetPath,models,usecasename, meta_data):\\
\\n register = mlflow_register(targetPath, usecasename, meta_data)\\
\\n register.setup_registration()\\
\\n\\
\\n runs_with_score = register.get_unprocessed_runs(models)\\
\\n best_run = register.get_best_run(runs_with_score)\\
\\n register.update_unprocessed(runs_with_score)\\
\\n return register.register_model(models, best_run)\\
\\n\\
\\ndef register(log):\\
\\n config = validateConfig()\\
\\n targetPath = Path('aion')/config['targetPath']\\
\\n models = config['models']\\
\\n merge_log_files(targetPath, models)\\
\\n meta_data_file = targetPath/IOFiles['metaData']\\
\\n if meta_data_file.exists():\\
\\n meta_data = read_json(meta_data_file)\\
\\n else:\\
\\n raise ValueError(f'Configuration file not found: {meta_data_file}')\\
\\n usecase = config['targetPath']\\
\\n # enable logging\\
\\n log_file = targetPath/IOFiles['log']\\
\\n log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)\\
\\n register_model_name = register_model(targetPath,models,usecase, meta_data)\\
\\n status = {'Status':'Success','Message':f'Model Registered: {register_model_name}'}\\
\\n log.info(f'output: {status}')\\
\\n return json.dumps(status)"
def getMainCodeModules(self):
modules = [{'module':'Path', 'mod_from':'pathlib'}
,{'module':'sys'}
,{'module':'os'}
,{'module':'json'}
,{'module':'logging'}
,{'module':'shutil'}
,{'module':'argparse'}
]
return modules
def addMainCode(self, models, indent=1):
self.codeText += "\\n\\
\\nif __name__ == '__main__':\\
\\n log = None\\
\\n try:\\
\\n print(register(log))\\
\\n except Exception as e:\\
\\n if log:\\
\\n log.error(e, exc_info=True)\\
\\n status = {'Status':'Failure','Message':str(e)}\\
\\n print(json.dumps(status))"
def addStatement(self, statement, indent=1):
self.codeText += f"\\n{self.tab * indent}{statement}"
def query_with_quetes_code(self, decs=True, indent=1):
return """\\n{first_indentation}def __get_unprocessed_runs_sorted(self):
{indentation}query = "tags.processed = 'no'"
{indentation}runs = self.client.search_runs(
{indentation} experiment_ids=self.experiment_id,
{indentation} filter_string=query,
{indentation} run_view_type=ViewType.ACTIVE_ONLY,
{indentation} order_by=['metrics.test_score {0}']
{indentation})
{indentation}return runs\\n""".format('DESC' if decs else 'ASC', first_indentation=indent*self.tab, indentation=(1+indent)*self.tab)
def addClassCode(self, smaller_is_better=False):
self.codeText += "\\
\\nclass mlflow_register():\\
\\n\\
\\n def __init__(self, input_path, model_name, meta_data):\\
\\n self.input_path = Path(input_path).resolve()\\
\\n self.model_name = model_name\\
\\n self.meta_data = meta_data\\
\\n self.logger = logging.getLogger('ModelRegister')\\
\\n self.client = None\\
\\n self.monitoring_data = read_json(self.input_path/IOFiles['monitor'])\\
\\n mlflow_default_config = {'artifacts_uri':'','tracking_uri_type':'','tracking_uri':'','registry_uri':''}\\
\\n if not self.monitoring_data.get('mlflow_config',False):\\
\\n self.monitoring_data['mlflow_config'] = mlflow_default_config\\
\\n\\
\\n def setup_registration(self):\\
\\n tracking_uri, artifact_uri, registry_uri = get_mlflow_uris(self.monitoring_data['mlflow_config'],self.input_path)\\
\\n self.logger.info(f'MLflow tracking uri: {tracking_uri}')\\
\\n self.logger.info(f'MLflow registry uri: {registry_uri}')\\
\\n mlflow.set_tracking_uri(tracking_uri)\\
\\n mlflow.set_registry_uri(registry_uri)\\
\\n self.client = mlflow.tracking.MlflowClient(\\
\\n tracking_uri=tracking_uri,\\
\\n registry_uri=registry_uri,\\
\\n )\\
\\n self.experiment_id = self.client.get_experiment_by_name(self.model_name).experiment_id\\
\\n"
self.codeText += self.query_with_quetes_code(smaller_is_better == False)
self.codeText += "\\
\\n def __log_unprocessed_runs(self, runs):\\
\\n self.logger.info('Unprocessed runs:')\\
\\n for run in runs:\\
\\n self.logger.info(' {}: {}'.format(run.info.run_id,run.data.metrics['test_score']))\\
\\n\\
\\n def get_unprocessed_runs(self, model_path):\\
\\n unprocessed_runs = self.__get_unprocessed_runs_sorted()\\
\\n if not unprocessed_runs:\\
\\n raise ValueError('Registering fail: No new trained model')\\
\\n self.__log_unprocessed_runs( unprocessed_runs)\\
\\n return unprocessed_runs\\
\\n\\
\\n def __wait_until_ready(self, model_name, model_version):\\
\\n client = MlflowClient()\\
\\n for _ in range(10):\\
\\n model_version_details = self.client.get_model_version(\\
\\n name=model_name,\\
\\n version=model_version,\\
\\n )\\
\\n status = ModelVersionStatus.from_string(model_version_details.status)\\
\\n if status == ModelVersionStatus.READY:\\
\\n break\\
\\n time.sleep(1)\\
\\n\\
\\n def __create_model(self, run):\\
\\n artifact_path = 'model'\\
\\n model_uri = 'runs:/{run_id}/{artifact_path}'.format(run_id=run.info.run_id, artifact_path=artifact_path)\\
\\n self.logger.info(f'Registering model (run id): {run.info.run_id}')\\
\\n model_details = mlflow.register_model(model_uri=model_uri, name=self.model_name)\\
\\n self.__wait_until_ready(model_details.name, model_details.version)\\
\\n self.client.set_tag(run.info.run_id, 'registered', 'yes' )\\
\\n state_transition = self.client.transition_model_version_stage(\\
\\n name=model_details.name,\\
\\n version=model_details.version,\\
\\n stage='Production',\\
\\n )\\
\\n self.logger.info(state_transition)\\
\\n return model_details\\
\\n\\
\\n def get_best_run(self, models):\\
\\n return models[0]\\
\\n\\
\\n def __validate_config(self):\\
\\n try:\\
\\n load_data_loc = self.meta_data['load_data']['Status']['DataFilePath']\\
\\n except KeyError:\\
\\n raise ValueError('DataIngestion step output is corrupted')\\
\\n\\
\\n def __mlflow_log_transformer_steps(self, best_run):\\
\\n run_id = best_run.info.run_id\\
\\n meta_data = read_json(self.input_path/(best_run.data.tags['mlflow.runName']+'_'+IOFiles['metaData']))\\
\\n self.__validate_config()\\
\\n with mlflow.start_run(run_id):\\
\\n if 'transformation' in meta_data.keys():\\
\\n if 'target_encoder' in meta_data['transformation'].keys():\\
\\n source_loc = meta_data['transformation']['target_encoder']\\
\\n mlflow.log_artifact(str(self.input_path/source_loc))\\
\\n meta_data['transformation']['target_encoder'] = Path(source_loc).name\\
\\n if 'preprocessor' in meta_data['transformation'].keys():\\
\\n source_loc = meta_data['transformation']['preprocessor']\\
\\n mlflow.log_artifact(str(self.input_path/source_loc))\\
\\n meta_data['transformation']['preprocessor'] = Path(source_loc).name\\
\\n\\
\\n write_json(meta_data, self.input_path/IOFiles['metaData'])\\
\\n mlflow.log_artifact(str(self.input_path/IOFiles['metaData']))\\
\\n\\
\\n def __update_processing_tag(self, processed_runs):\\
\\n self.logger.info('Changing status to processed:')\\
\\n for run in processed_runs:\\
\\n self.client.set_tag(run.info.run_id, 'processed', 'yes')\\
\\n self.logger.info(f' run id: {run.info.run_id}')\\
\\n\\
\\n def update_unprocessed(self, runs):\\
\\n return self.__update_processing_tag( runs)\\
\\n\\
\\n def __force_register(self, best_run):\\
\\n self.__create_model( best_run)\\
\\n self.__mlflow_log_transformer_steps( best_run)\\
\\n production_json = self.input_path/IOFiles['production']\\
\\n production_model = {'Model':best_run.data.tags['mlflow.runName'],'runNo':self.monitoring_data['runNo'],'score':best_run.data.metrics['test_score']}\\
\\n write_json(production_model, production_json)\\
\\n database_path = self.input_path/(self.input_path.stem + '.db')\\
\\n if database_path.exists():\\
\\n database_path.unlink()\\
\\n return best_run.data.tags['mlflow.runName']\\
\\n\\
\\n def __get_register_model_score(self):\\
\\n reg = self.client.list_registered_models()\\
\\n if not reg:\\
\\n return '', 0\\
\\n run_id = reg[0].latest_versions[0].run_id\\
\\n run = self.client.get_run(run_id)\\
\\n score = run.data.metrics['test_score']\\
\\n return run_id, score\\
\\n\\
\\n def register_model(self, models, best_run):\\
\\n return self.__force_register(best_run)"
def local_functions_code(self, smaller_is_ |
better=True, indent=1):
if smaller_is_better:
min_max = 'min'
else:
min_max = 'max'
self.codeText += "\\ndef validate_config(deploy_dict):\\
\\n try:\\
\\n load_data_loc = deploy_dict['load_data']['Status']['DataFilePath']\\
\\n except KeyError:\\
\\n raise ValueError('DataIngestion step output is corrupted')\\
\\n\\
\\ndef get_digest(fname):\\
\\n import hashlib\\
\\n hash_algo = hashlib.sha256()\\
\\n with open(fname, 'rb') as f:\\
\\n for chunk in iter(lambda: f.read(2 ** 20), b''):\\
\\n hash_algo.update(chunk)\\
\\n return hash_algo.hexdigest()\\
\\n"
def getCode(self, indent=1):
return self.function_code + '\\n' + self.codeText
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from .imports import importModule
utility_functions = {
'load_data': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
'transformer': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
'selector': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
'train': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
'register': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
'Prediction': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
'drift': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
}
#TODO convert read and write functions in to class functions
functions_code = {
'read_json':{'imports':[{'mod':'json'}],'code':"\\n\\
\\ndef read_json(file_path):\\
\\n data = None\\
\\n with open(file_path,'r') as f:\\
\\n data = json.load(f)\\
\\n return data\\
\\n"},
'write_json':{'imports':[{'mod':'json'}],'code':"\\n\\
\\ndef write_json(data, file_path):\\
\\n with open(file_path,'w') as f:\\
\\n json.dump(data, f)\\
\\n"},
'read_data':{'imports':[{'mod':'pandas','mod_as':'pd'}],'code':"\\n\\
\\ndef read_data(file_path, encoding='utf-8', sep=','):\\
\\n return pd.read_csv(file_path, encoding=encoding, sep=sep)\\
\\n"},
'write_data':{'imports':[{'mod':'pandas','mod_as':'pd'}],'code':"\\n\\
\\ndef write_data(data, file_path, index=False):\\
\\n return data.to_csv(file_path, index=index)\\
\\n\\
\\n#Uncomment and change below code for google storage\\
\\n#from google.cloud import storage\\
\\n#def write_data(data, file_path, index=False):\\
\\n# file_name= file_path.name\\
\\n# data.to_csv('output_data.csv')\\
\\n# storage_client = storage.Client()\\
\\n# bucket = storage_client.bucket('aion_data')\\
\\n# bucket.blob('prediction/'+file_name).upload_from_filename('output_data.csv', content_type='text/csv')\\
\\n# return data\\
\\n"},
'is_file_name_url':{'imports':[],'code':"\\n\\
\\ndef is_file_name_url(file_name):\\
\\n supported_urls_starts_with = ('gs://','https://','http://')\\
\\n return file_name.startswith(supported_urls_starts_with)\\
\\n"},
'logger_class':{'imports':[{'mod':'logging'}, {'mod':'io'}],'code':"\\n\\
\\nclass logger():\\
\\n #setup the logger\\
\\n def __init__(self, log_file, mode='w', logger_name=None):\\
\\n logging.basicConfig(filename=log_file, filemode=mode, format='%(asctime)s %(name)s- %(message)s', level=logging.INFO, datefmt='%d-%b-%y %H:%M:%S')\\
\\n self.log = logging.getLogger(logger_name)\\
\\n\\
\\n #get logger\\
\\n def getLogger(self):\\
\\n return self.log\\
\\n\\
\\n def info(self, msg):\\
\\n self.log.info(msg)\\
\\n\\
\\n def error(self, msg, exc_info=False):\\
\\n self.log.error(msg,exc_info)\\
\\n\\
\\n # format and log dataframe\\
\\n def log_dataframe(self, df, rows=2, msg=None):\\
\\n buffer = io.StringIO()\\
\\n df.info(buf=buffer)\\
\\n log_text = 'Data frame{}'.format(' after ' + msg + ':' if msg else ':')\\
\\n log_text += '\\\\n\\\\t'+str(df.head(rows)).replace('\\\\n','\\\\n\\\\t')\\
\\n log_text += ('\\\\n\\\\t' + buffer.getvalue().replace('\\\\n','\\\\n\\\\t'))\\
\\n self.log.info(log_text)\\
\\n"},
}
class utility_function():
def __init__(self, module):
if module in utility_functions.keys():
self.module_name = module
else:
self.module_name = None
self.importer = importModule()
self.codeText = ""
def get_code(self):
code = ""
if self.module_name:
functions = utility_functions[self.module_name]
for function in functions:
self.codeText += self.get_function_code(function)
code = self.importer.getCode()
code += self.codeText
return code
def get_function_code(self, name):
code = ""
if name in functions_code.keys():
code += functions_code[name]['code']
if self.importer:
if 'imports' in functions_code[name].keys():
for module in functions_code[name]['imports']:
mod_name = module['mod']
mod_from = module.get('mod_from', None)
mod_as = module.get('mod_as', None)
self.importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as)
return code
def get_importer(self):
return self.importer
if __name__ == '__main__':
obj = utility_function('load_data')
p = obj.get_utility_code()
print(p)<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from .imports import importModule
from .load_data import tabularDataReader
from .transformer import transformer as profiler
from .transformer import data_profiler
from .selector import selector
from .trainer import learner
from .register import register
from .deploy import deploy
from .drift_analysis import drift
from .functions import global_function
from .data_reader import data_reader
from .utility import utility_function
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class deploy():
def __init__(self, target_encoder=False, feature_reducer=False, score_smaller_is_better=True, tab_size=4):
self.tab = ' ' * tab_size
self.codeText = "\\n\\n\\
\\nclass deploy():\\
\\n\\
\\n def __init__(self, base_config, log=None):\\
\\n self.targetPath = (Path('aion')/base_config['targetPath']).resolve()\\
\\n if log:\\
\\n self.logger = log\\
\\n else:\\
\\n log_file = self.targetPath/IOFiles['log']\\
\\n self.logger = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)\\
\\n try:\\
\\n self.initialize(base_config)\\
\\n except Exception as e:\\
\\n self.logger.error(e, exc_info=True)\\
\\n\\
\\n def initialize(self, base_config):\\
\\n self.usecase = base_config['targetPath']\\
\\n monitoring_data = read_json(self.targetPath/IOFiles['monitor'])\\
\\n self.prod_db_type = monitoring_data['prod_db_type']\\
\\n self.db_config = monitoring_data['db_config']\\
\\n mlflow_default_config = {'artifacts_uri':'','tracking_uri_type':'','tracking_uri':'','registry_uri':''}\\
\\n tracking_uri, artifact_uri, registry_uri = get_mlflow_uris(monitoring_data.get('mlflow_config',mlflow_default_config), self.targetPath)\\
\\n mlflow.tracking.set_tracking_uri(tracking_uri)\\
\\n mlflow.tracking.set_registry_uri(registry_uri)\\
\\n client = mlflow.tracking.MlflowClient()\\
\\n self.model_version = client.get_latest_versions(self.usecase, stages=['production'] )\\
\\n model_version_uri = 'models:/{model_name}/production'.format(model_name=self.usecase)\\
\\n self.model = mlflow.pyfunc.load_model(model_version_uri)\\
\\n run = client.get_run(self.model.metadata.run_id)\\
\\n if run.info.artifact_uri.startswith('file:'): #remove file:///\\
\\n skip_name = 'file:'\\
\\n if run.info.artifact_uri.startswith('file:///'):\\
\\n skip_name = 'file:///'\\
\\n self.artifact_path = Path(run.info.artifact_uri[len(skip_name) : ])\\
\\n self.artifact_path_type = 'file'\\
\\n meta_data = read_json(self.artifact_path/IOFiles['metaData'])\\
\\n else:\\
\\n self.artifact_path = run.info.artifact_uri\\
\\n self.artifact_path_type = 'url'\\
\\n meta_data_file = mlflow.artifacts.download_artifacts(self.artifact_path+'/'+IOFiles['metaData'])\\
\\n meta_data = read_json(meta_data_file)\\
\\n self.selected_features = meta_data['load_data']['selected_features']\\
\\n self.train_features = meta_data['training']['features']"
if target_encoder:
self.codeText += "\\
\\n if self.artifact_path_type == 'url':\\
\\n preprocessor_file = mlflow.artifacts.download_artifacts(self.artifact_path+'/'+meta_data['transformation']['preprocessor'])\\
\\n target_encoder_file = mlflow.artifacts.download_artifacts(self.artifact_path+'/'+meta_data['transformation']['target_encoder'])\\
\\n else:\\
\\n preprocessor_file = self.artifact_path/meta_data['transformation']['preprocessor']\\
\\n target_encoder_file = self.artifact_path/meta_data['transformation']['target_encoder']\\
\\n self.target_encoder = joblib.load(target_encoder_file)"
else:
self.codeText += "\\
\\n if self.artifact_path_type == 'url':\\
\\n preprocessor_file = mlflow.artifacts.download_artifacts(self.artifact_path+'/'+meta_data['transformation']['preprocessor'])\\
\\n else:\\
\\n preprocessor_file = self.artifact_path/meta_data['transformation']['preprocessor']"
self.codeText += "\\
\\n self.preprocessor = joblib.load(preprocessor_file)\\
\\n self.preprocess_out_columns = meta_data['transformation']['preprocess_out_columns']\\
"
if feature_reducer:
self.codeText += "\\
\\n if self.artifact_path_type == 'url':\\
\\n feature_reducer_file = mlflow.artifacts.download_artifacts(self.artifact_path+'/'+meta_data['featureengineering']['feature_reducer']['file'])\\
\\n else:\\
\\n feature_reducer_file = self.artifact_path/meta_data['featureengineering']['feature_reducer']['file']\\
\\n self.feature_reducer = joblib.load(feature_reducer_file)\\
\\n self.feature_reducer_cols = meta_data['featureengineering']['feature_reducer']['features']"
self.codeText +="\\n\\
\\n def write_to_db(self, data):\\
\\n prod_file = IOFiles['prodData']\\
\\n writer = dataReader(reader_type=self.prod_db_type,target_path=self.targetPath, config=self.db_config )\\
\\n writer.write(data, prod_file)\\
\\n writer.close()\\
\\n\\
\\n def predict(self, data=None):\\
\\n try:\\
\\n return self.__predict(data)\\
\\n except Exception as e:\\
\\n if self.logger:\\
\\n self.logger.error(e, exc_info=True)\\ |
\\n raise ValueError(json.dumps({'Status':'Failure', 'Message': str(e)}))\\
\\n\\
\\n def __predict(self, data=None):\\
\\n df = pd.DataFrame()\\
\\n jsonData = json.loads(data)\\
\\n df = pd.json_normalize(jsonData)\\
\\n if len(df) == 0:\\
\\n raise ValueError('No data record found')\\
\\n missing_features = [x for x in self.selected_features if x not in df.columns]\\
\\n if missing_features:\\
\\n raise ValueError(f'some feature/s is/are missing: {missing_features}')\\
\\n df_copy = df.copy()\\
\\n df = df[self.selected_features]\\
\\n df = self.preprocessor.transform(df)\\
\\n if isinstance(df, scipy.sparse.spmatrix):\\
\\n df = df.toarray()\\
\\n df = pd.DataFrame(df, columns=self.preprocess_out_columns)"
if feature_reducer:
self.codeText += "\\n df = self.feature_reducer.transform(df[self.feature_reducer_cols])"
else:
self.codeText += "\\n df = df[self.train_features]"
if target_encoder:
self.codeText += "\\n df = df.astype(np.float32)\\
\\n output = pd.DataFrame(self.model._model_impl.predict_proba(df), columns=self.target_encoder.classes_)\\
\\n df_copy['prediction'] = output.idxmax(axis=1)\\
\\n self.write_to_db(df_copy)\\
\\n df_copy['probability'] = output.max(axis=1).round(2)\\
\\n df_copy['remarks'] = output.apply(lambda x: x.to_json(), axis=1)\\
\\n output = df_copy.to_json(orient='records')"
else:
self.codeText += "\\n output = self.model._model_impl.predict(df).reshape(1, -1)[0].round(2)\\
\\n df_copy['prediction'] = output\\
\\n self.write_to_db(df_copy)\\
\\n output = df_copy.to_json(orient='records')"
self.codeText += "\\n return output"
self.input_files = {}
self.output_files = {}
self.addInputFiles({'inputData' : 'rawData.dat', 'metaData' : 'modelMetaData.json', 'performance' : 'performance.json','monitor':'monitoring.json','log':'predict.log','prodData':'prodData'})
def addInputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def addOutputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def getInputFiles(self):
text = 'IOFiles = '
if not self.input_files:
text += '{ }'
else:
text += json.dumps(self.input_files, indent=4)
return text
def getOutputFiles(self):
text = 'output_file = '
if not self.output_files:
text += '{ }'
else:
text += json.dumps(self.output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\\n'
text += self.getInputFiles()
text += '\\n'
text += self.getOutputFiles()
if indent:
text = text.replace('\\n', self.tab * indent + '\\n')
return text
def addStatement(self, statement, indent=1):
pass
def getCode(self):
return self.codeText
def getGroundtruthCode(self):
return """
import sys
import math
import json
import sqlite3
import pandas as pd
from datetime import datetime
from pathlib import Path
import platform
from utility import *
from data_reader import dataReader
IOFiles = {
"monitoring":"monitoring.json",
"prodDataGT":"prodDataGT"
}
class groundtruth():
def __init__(self, base_config):
self.targetPath = Path('aion')/base_config['targetPath']
data = read_json(self.targetPath/IOFiles['monitoring'])
self.prod_db_type = data['prod_db_type']
self.db_config = data['db_config']
def actual(self, data=None):
df = pd.DataFrame()
jsonData = json.loads(data)
df = pd.json_normalize(jsonData)
if len(df) == 0:
raise ValueError('No data record found')
self.write_to_db(df)
status = {'Status':'Success','Message':'uploaded'}
return json.dumps(status)
def write_to_db(self, data):
prod_file = IOFiles['prodDataGT']
writer = dataReader(reader_type=self.prod_db_type, target_path=self.targetPath, config=self.db_config )
writer.write(data, prod_file)
writer.close()
"""
def getServiceCode(self):
return """
from http.server import BaseHTTPRequestHandler,HTTPServer
from socketserver import ThreadingMixIn
import os
from os.path import expanduser
import platform
import threading
import subprocess
import argparse
import re
import cgi
import json
import shutil
import logging
import sys
import time
import seaborn as sns
from pathlib import Path
from predict import deploy
from groundtruth import groundtruth
import pandas as pd
import scipy.stats as st
import numpy as np
import warnings
from utility import *
from data_reader import dataReader
warnings.filterwarnings("ignore")
config_input = None
IOFiles = {
"inputData": "rawData.dat",
"metaData": "modelMetaData.json",
"production": "production.json",
"log": "aion.log",
"monitoring":"monitoring.json",
"prodData": "prodData",
"prodDataGT":"prodDataGT"
}
def DistributionFinder(data):
try:
distributionName = ""
sse = 0.0
KStestStatic = 0.0
dataType = ""
if (data.dtype == "float64" or data.dtype == "float32"):
dataType = "Continuous"
elif (data.dtype == "int"):
dataType = "Discrete"
elif (data.dtype == "int64"):
dataType = "Discrete"
if (dataType == "Discrete"):
distributions = [st.bernoulli, st.binom, st.geom, st.nbinom, st.poisson]
index, counts = np.unique(data.astype(int), return_counts=True)
if (len(index) >= 2):
best_sse = np.inf
y1 = []
total = sum(counts)
mean = float(sum(index * counts)) / total
variance = float((sum(index ** 2 * counts) - total * mean ** 2)) / (total - 1)
dispersion = mean / float(variance)
theta = 1 / float(dispersion)
r = mean * (float(theta) / 1 - theta)
for j in counts:
y1.append(float(j) / total)
pmf1 = st.bernoulli.pmf(index, mean)
pmf2 = st.binom.pmf(index, len(index), p=mean / len(index))
pmf3 = st.geom.pmf(index, 1 / float(1 + mean))
pmf4 = st.nbinom.pmf(index, mean, r)
pmf5 = st.poisson.pmf(index, mean)
sse1 = np.sum(np.power(y1 - pmf1, 2.0))
sse2 = np.sum(np.power(y1 - pmf2, 2.0))
sse3 = np.sum(np.power(y1 - pmf3, 2.0))
sse4 = np.sum(np.power(y1 - pmf4, 2.0))
sse5 = np.sum(np.power(y1 - pmf5, 2.0))
sselist = [sse1, sse2, sse3, sse4, sse5]
best_distribution = 'NA'
for i in range(0, len(sselist)):
if best_sse > sselist[i] > 0:
best_distribution = distributions[i].name
best_sse = sselist[i]
elif (len(index) == 1):
best_distribution = "Constant Data-No Distribution"
best_sse = 0.0
distributionName = best_distribution
sse = best_sse
elif (dataType == "Continuous"):
distributions = [st.uniform, st.expon, st.weibull_max, st.weibull_min, st.chi, st.norm, st.lognorm, st.t,
st.gamma, st.beta]
best_distribution = st.norm.name
best_sse = np.inf
datamin = data.min()
datamax = data.max()
nrange = datamax - datamin
y, x = np.histogram(data.astype(float), bins='auto', density=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
for distribution in distributions:
params = distribution.fit(data.astype(float))
arg = params[:-2]
loc = params[-2]
scale = params[-1]
pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)
sse = np.sum(np.power(y - pdf, 2.0))
if (best_sse > sse > 0):
best_distribution = distribution.name
best_sse = sse
distributionName = best_distribution
sse = best_sse
except:
response = str(sys.exc_info()[0])
message = 'Job has Failed' + response
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
print(message)
return distributionName, sse
def getDriftDistribution(feature, dataframe, newdataframe=pd.DataFrame()):
import matplotlib.pyplot as plt
import math
import io, base64, urllib
np.seterr(divide='ignore', invalid='ignore')
try:
plt.clf()
except:
pass
plt.rcParams.update({'figure.max_open_warning': 0})
sns.set(color_codes=True)
pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
if len(feature) > 4:
numneroffeatures = len(feature)
plt.figure(figsize=(10, numneroffeatures*2))
else:
plt.figure(figsize=(10,5))
for i in enumerate(feature):
dataType = dataframe[i[1]].dtypes
if dataType not in pandasNumericDtypes:
dataframe[i[1]] = pd.Categorical(dataframe[i[1]])
dataframe[i[1]] = dataframe[i[1]].cat.codes
dataframe[i[1]] = dataframe[i[1]].astype(int)
dataframe[i[1]] = dataframe[i[1]].fillna(dataframe[i[1]].mode()[0])
else:
dataframe[i[1]] = dataframe[i[1]].fillna(dataframe[i[1]].mean())
plt.subplots_adjust(hspace=0.5, wspace=0.7, top=1)
plt.subplot(math.ceil((len(feature) / 2)), 2, i[0] + 1)
distname, sse = DistributionFinder(dataframe[i[1]])
print(distname)
ax = sns.distplot(dataframe[i[1]], label=distname)
ax.legend(loc='best')
if newdataframe.empty == False:
dataType = newdataframe[i[1]].dtypes
if dataType not in pandasNumericDtypes:
newdataframe[i[1]] = pd.Categorical(newdataframe[i[1]])
newdataframe[i[1]] = newdataframe[i[1]].cat.codes
newdataframe[i[1]] = newdataframe[i[1]].astype(int)
newdataframe[i[1]] = newdataframe[i[1]].fillna(newdataframe[i[1]].mode()[0])
else:
newdataframe[i[1]] = newdataframe[i[1]].fillna(newdataframe[i[1]].mean())
distname, sse = DistributionFinder(newdataframe[i[1]])
print(distname)
ax = sns.distplot(newdataframe[i[1]],label=distname)
ax.legend(loc='best')
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
string = base64.b64encode(buf.read())
uri = urllib.parse.quote(string)
return uri
def read_json(file_path):
data = None
with open(file_path,'r') as f:
data = json.load(f)
return data
class HTTPRequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
print('PYTHON ######## REQUEST ####### STARTED')
if None != re.search('/AION/', self.path) or None != re.search('/aion/', self.path):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
if ctype == 'application/json':
length = int(self.headers.get('content-length'))
data = self.rfile.read(length)
usecase = self.path.split('/')[-2]
if usecase.lower() == config_input['targetPath'].lower():
operation = self.path.split('/')[-1]
data = json.loads(data)
dataStr = json.dumps(data)
if operation.lower() == 'predict':
output=deployobj.predict(dataStr)
resp = output
elif operation |
.lower() == 'groundtruth':
gtObj = groundtruth(config_input)
output = gtObj.actual(dataStr)
resp = output
elif operation.lower() == 'delete':
targetPath = Path('aion')/config_input['targetPath']
for file in data:
x = targetPath/file
if x.exists():
os.remove(x)
resp = json.dumps({'Status':'Success'})
else:
outputStr = json.dumps({'Status':'Error','Msg':'Operation not supported'})
resp = outputStr
else:
outputStr = json.dumps({'Status':'Error','Msg':'Wrong URL'})
resp = outputStr
else:
outputStr = json.dumps({'Status':'ERROR','Msg':'Content-Type Not Present'})
resp = outputStr
resp=resp+'\\\\n'
resp=resp.encode()
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(resp)
else:
print('python ==> else1')
self.send_response(403)
self.send_header('Content-Type', 'application/json')
self.end_headers()
print('PYTHON ######## REQUEST ####### ENDED')
return
def do_GET(self):
print('PYTHON ######## REQUEST ####### STARTED')
if None != re.search('/AION/', self.path) or None != re.search('/aion/', self.path):
usecase = self.path.split('/')[-2]
self.send_response(200)
self.targetPath = Path('aion')/config_input['targetPath']
meta_data_file = self.targetPath/IOFiles['metaData']
if meta_data_file.exists():
meta_data = read_json(meta_data_file)
else:
raise ValueError(f'Configuration file not found: {meta_data_file}')
production_file = self.targetPath/IOFiles['production']
if production_file.exists():
production_data = read_json(production_file)
else:
raise ValueError(f'Production Details not found: {production_file}')
operation = self.path.split('/')[-1]
if (usecase.lower() == config_input['targetPath'].lower()) and (operation.lower() == 'metrices'):
self.send_header('Content-Type', 'text/html')
self.end_headers()
ModelString = production_data['Model']
ModelPerformance = ModelString+'_performance.json'
performance_file = self.targetPath/ModelPerformance
if performance_file.exists():
performance_data = read_json(performance_file)
else:
raise ValueError(f'Production Details not found: {performance_data}')
Scoring_Creteria = performance_data['scoring_criteria']
train_score = round(performance_data['metrices']['train_score'],2)
test_score = round(performance_data['metrices']['test_score'],2)
current_score = 'NA'
monitoring = read_json(self.targetPath/IOFiles['monitoring'])
reader = dataReader(reader_type=monitoring['prod_db_type'],target_path=self.targetPath, config=monitoring['db_config'])
inputDatafile = self.targetPath/IOFiles['inputData']
NoOfPrediction = 0
NoOfGroundTruth = 0
inputdistribution = ''
if reader.file_exists(IOFiles['prodData']):
dfPredict = reader.read(IOFiles['prodData'])
dfinput = pd.read_csv(inputDatafile)
features = meta_data['training']['features']
inputdistribution = getDriftDistribution(features,dfinput,dfPredict)
NoOfPrediction = len(dfPredict)
if reader.file_exists(IOFiles['prodDataGT']):
dfGroundTruth = reader.read(IOFiles['prodDataGT'])
NoOfGroundTruth = len(dfGroundTruth)
common_col = [k for k in dfPredict.columns.tolist() if k in dfGroundTruth.columns.tolist()]
proddataDF = pd.merge(dfPredict, dfGroundTruth, on =common_col,how = 'inner')
if Scoring_Creteria.lower() == 'accuracy':
from sklearn.metrics import accuracy_score
current_score = accuracy_score(proddataDF[config_input['target_feature']], proddataDF['prediction'])
current_score = round((current_score*100),2)
elif Scoring_Creteria.lower() == 'recall':
from sklearn.metrics import accuracy_score
current_score = recall_score(proddataDF[config_input['target_feature']], proddataDF['prediction'],average='macro')
current_score = round((current_score*100),2)
msg = \\"""<html>
<head>
<title>Performance Details</title>
</head>
<style>
table, th, td {border}
</style>
<body>
<h2><b>Deployed Model:</b>{ModelString}</h2>
<br/>
<table style="width:50%">
<tr>
<td>No of Prediction</td>
<td>{NoOfPrediction}</td>
</tr>
<tr>
<td>No of GroundTruth</td>
<td>{NoOfGroundTruth}</td>
</tr>
</table>
<br/>
<table style="width:100%">
<tr>
<th>Score Type</th>
<th>Train Score</th>
<th>Test Score</th>
<th>Production Score</th>
</tr>
<tr>
<td>{Scoring_Creteria}</td>
<td>{train_score}</td>
<td>{test_score}</td>
<td>{current_score}</td>
</tr>
</table>
<br/>
<br/>
<img src="data:image/png;base64,{newDataDrift}" alt="" >
</body>
</html>
\\""".format(border='{border: 1px solid black;}',ModelString=ModelString,Scoring_Creteria=Scoring_Creteria,NoOfPrediction=NoOfPrediction,NoOfGroundTruth=NoOfGroundTruth,train_score=train_score,test_score=test_score,current_score=current_score,newDataDrift=inputdistribution)
elif (usecase.lower() == config_input['targetPath'].lower()) and (operation.lower() == 'logs'):
self.send_header('Content-Type', 'text/plain')
self.end_headers()
log_file = self.targetPath/IOFiles['log']
if log_file.exists():
with open(log_file) as f:
msg = f.read()
f.close()
else:
raise ValueError(f'Log Details not found: {log_file}')
else:
self.send_header('Content-Type', 'application/json')
self.end_headers()
features = meta_data['load_data']['selected_features']
bodydes='['
for x in features:
if bodydes != '[':
bodydes = bodydes+','
bodydes = bodydes+'{"'+x+'":"value"}'
bodydes+=']'
urltext = '/AION/'+config_input['targetPath']+'/predict'
urltextgth='/AION/'+config_input['targetPath']+'/groundtruth'
urltextproduction='/AION/'+config_input['targetPath']+'/metrices'
msg=\\"""
Version:{modelversion}
RunNo: {runNo}
URL for Prediction
==================
URL:{url}
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
Output: prediction,probability(if Applicable),remarks corresponding to each row.
URL for GroundTruth
===================
URL:{urltextgth}
RequestType: POST
Content-Type=application/json
Note: Make Sure that one feature (ID) should be unique in both predict and groundtruth. Otherwise outputdrift will not work
URL for Model In Production Analysis
====================================
URL:{urltextproduction}
RequestType: GET
Content-Type=application/json
\\""".format(modelversion=config_input['modelVersion'],runNo=config_input['deployedRunNo'],url=urltext,urltextgth=urltextgth,urltextproduction=urltextproduction,displaymsg=bodydes)
self.wfile.write(msg.encode())
else:
self.send_response(403)
self.send_header('Content-Type', 'application/json')
self.end_headers()
return
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
allow_reuse_address = True
def shutdown(self):
self.socket.close()
HTTPServer.shutdown(self)
class file_status():
def __init__(self, reload_function, params, file, logger):
self.files_status = {}
self.initializeFileStatus(file)
self.reload_function = reload_function
self.params = params
self.logger = logger
def initializeFileStatus(self, file):
self.files_status = {'path': file, 'time':file.stat().st_mtime}
def is_file_changed(self):
if self.files_status['path'].stat().st_mtime > self.files_status['time']:
self.files_status['time'] = self.files_status['path'].stat().st_mtime
return True
return False
def run(self):
global config_input
while( True):
time.sleep(30)
if self.is_file_changed():
production_details = targetPath/IOFiles['production']
if not production_details.exists():
raise ValueError(f'Model in production details does not exist')
productionmodel = read_json(production_details)
config_file = Path(__file__).parent/'config.json'
if not Path(config_file).exists():
raise ValueError(f'Config file is missing: {config_file}')
config_input = read_json(config_file)
config_input['deployedModel'] = productionmodel['Model']
config_input['deployedRunNo'] = productionmodel['runNo']
self.logger.info('Model changed Reloading.....')
self.logger.info(f'Model: {config_input["deployedModel"]}')
self.logger.info(f'Version: {str(config_input["modelVersion"])}')
self.logger.info(f'runNo: {str(config_input["deployedRunNo"])}')
self.reload_function(config_input)
class SimpleHttpServer():
def __init__(self, ip, port, model_file_path,reload_function,params, logger):
self.server = ThreadedHTTPServer((ip,port), HTTPRequestHandler)
self.status_checker = file_status( reload_function, params, model_file_path, logger)
def start(self):
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
self.status_thread = threading.Thread(target=self.status_checker.run)
self.status_thread.start()
def waitForThread(self):
self.server_thread.join()
self.status_thread.join()
def stop(self):
self.server.shutdown()
self.waitForThread()
if __name__=='__main__':
parser = argparse.ArgumentParser(description='HTTP Server')
parser.add_argument('-ip','--ipAddress', help='HTTP Server IP')
parser.add_argument('-pn','--portNo', type=int, help='Listening port for HTTP Server')
args = parser.parse_args()
config_file = Path(__file__).parent/'config.json'
if not Path(config_file).exists():
raise ValueError(f'Config file is missing: {config_file}')
config = read_json(config_file)
if args.ipAddress:
config['ipAddress'] = args.ipAddress
if args.portNo:
config['portNo'] = args.portNo
targetPath = Path('aion')/config['targetPath']
if not targetPath.exists():
raise ValueError(f'targetPath does not exist')
production_details = targetPath/IOFiles['production']
if not production_details.exists():
raise ValueError(f'Model in production details does not exist')
productionmodel = read_json(production_details)
config['deployedModel'] = productionmodel['Model']
config['deployedRunNo'] = productionmodel['runNo']
#server = SimpleHttpServer(config['ipAddress'],int(config['portNo']))
config_input = config
logging.basicConfig(filename= Path(targetPath)/IOFiles['log'], filemode='a', format='%(asctime)s %(name)s- %(message)s', level=logging.INFO, datefmt='%d-%b-%y %H:%M:%S') |
logger = logging.getLogger(Path(__file__).parent.name)
deployobj = deploy(config_input, logger)
server = SimpleHttpServer(config['ipAddress'],int(config['portNo']),targetPath/IOFiles['production'],deployobj.initialize,config_input, logger)
logger.info('HTTP Server Running...........')
logger.info(f"IP Address: {config['ipAddress']}")
logger.info(f"Port No.: {config['portNo']}")
print('HTTP Server Running...........')
print('For Prediction')
print('================')
print('Request Type: Post')
print('Content-Type: application/json')
print('URL: /AION/'+config['targetPath']+'/predict')
print('\\\\nFor GroundTruth')
print('================')
print('Request Type: Post')
print('Content-Type: application/json')
print('URL: /AION/'+config['targetPath']+'/groundtruth')
print('\\\\nFor Help')
print('================')
print('Request Type: Get')
print('Content-Type: application/json')
print('URL: /AION/'+config['targetPath']+'/help')
print('\\\\nFor Model In Production Analysis')
print('================')
print('Request Type: Get')
print('Content-Type: application/json')
print('URL: /AION/'+config['targetPath']+'/metrices')
server.start()
server.waitForThread()
"""<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
class global_function():
def __init__(self, tab_size=4):
self.tab = ' ' * tab_size
self.codeText = ""
self.available_functions = {
'iqr':{'name':'iqrOutlier','code':f"\\n\\ndef iqrOutlier(df):\\
\\n{self.tab}Q1 = df.quantile(0.25)\\
\\n{self.tab}Q3 = df.quantile(0.75)\\
\\n{self.tab}IQR = Q3 - Q1\\
\\n{self.tab}index = ~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR))).any(axis=1)\\
\\n{self.tab}return index"},
'zscore':{'name':'zscoreOutlier','imports':[{'mod':'stats','mod_from':'scipy'},{'mod':'numpy'}],'code':f"\\n\\ndef zscoreOutlier(df):\\
\\n{self.tab}z = numpy.abs(stats.zscore(df))\\
\\n{self.tab}index = (z < 3).all(axis=1)\\
\\n{self.tab}return index"},
'iforest':{'name':'iforestOutlier','imports':[{'mod':'IsolationForest','mod_from':'sklearn.ensemble'}],'code':f"\\n\\ndef iforestOutlier(df):\\
\\n{self.tab}from sklearn.ensemble import IsolationForest\\
\\n{self.tab}isolation_forest = IsolationForest(n_estimators=100)\\
\\n{self.tab}isolation_forest.fit(df)\\
\\n{self.tab}y_pred_train = isolation_forest.predict(df)\\
\\n{self.tab}return y_pred_train == 1"},
'minMaxImputer':{'name':'minMaxImputer','code':f"\\n\\nclass minMaxImputer(TransformerMixin):\\
\\n{self.tab}def __init__(self, strategy='max'):\\
\\n{self.tab}{self.tab}self.strategy = strategy\\
\\n{self.tab}def fit(self, X, y=None):\\
\\n{self.tab}{self.tab}self.feature_names_in_ = X.columns\\
\\n{self.tab}{self.tab}if self.strategy == 'min':\\
\\n{self.tab}{self.tab}{self.tab}self.statistics_ = X.min()\\
\\n{self.tab}{self.tab}else:\\
\\n{self.tab}{self.tab}{self.tab}self.statistics_ = X.max()\\
\\n{self.tab}{self.tab}return self\\
\\n{self.tab}def transform(self, X):\\
\\n{self.tab}{self.tab}import numpy\\
\\n{self.tab}{self.tab}return numpy.where(X.isna(), self.statistics_, X)"},
'DummyEstimator':{'name':'DummyEstimator','code':f"\\n\\nclass DummyEstimator(BaseEstimator):\\
\\n{self.tab}def fit(self): pass\\
\\n{self.tab}def score(self): pass"},
'start_reducer':{'name':'start_reducer','imports':[{'mod':'itertools'},{'mod':'numpy','mod_as':'np'},{'mod':'pandas','mod_as':'pd'},{'mod':'VarianceThreshold','mod_from':'sklearn.feature_selection'}], 'code':"""
def start_reducer(df,target_feature,corr_threshold=0.85,var_threshold=0.05):
qconstantColumns = []
train_features = df.columns.tolist()
train_features.remove(target_feature)
df = df.loc[:, (df != df.iloc[0]).any()] #remove constant feature
numeric_features = df.select_dtypes(include='number').columns.tolist()
non_numeric_features = df.select_dtypes(exclude='number').columns.tolist()
if numeric_features and var_threshold:
qconstantFilter = VarianceThreshold(threshold=var_threshold)
tempDf=df[numeric_features]
qconstantFilter.fit(tempDf)
qconstantColumns = [column for column in numeric_features if column not in tempDf.columns[qconstantFilter.get_support()]]
if target_feature in qconstantColumns:
qconstantColumns.remove(target_feature)
numeric_features = list(set(numeric_features) - set(qconstantColumns))
if numeric_features:
numColPairs = list(itertools.product(numeric_features, numeric_features))
for item in numColPairs:
if(item[0] == item[1]):
numColPairs.remove(item)
tempArray = []
for item in numColPairs:
tempCorr = np.abs(df[item[0]].corr(df[item[1]]))
if(tempCorr > corr_threshold):
tempArray.append(item[0])
tempArray = np.unique(tempArray).tolist()
nonsimilarNumericalCols = list(set(numeric_features) - set(tempArray))
groupedFeatures = []
if tempArray:
corrDic = {}
for feature in tempArray:
temp = []
for col in tempArray:
tempCorr = np.abs(df[feature].corr(df[col]))
temp.append(tempCorr)
corrDic[feature] = temp
#Similar correlation df
corrDF = pd.DataFrame(corrDic,index = tempArray)
corrDF.loc[:,:] = np.tril(corrDF, k=-1)
alreadyIn = set()
similarFeatures = []
for col in corrDF:
perfectCorr = corrDF[col][corrDF[col] > corr_threshold].index.tolist()
if perfectCorr and col not in alreadyIn:
alreadyIn.update(set(perfectCorr))
perfectCorr.append(col)
similarFeatures.append(perfectCorr)
updatedSimFeatures = []
for items in similarFeatures:
if(target_feature != '' and target_feature in items):
for p in items:
updatedSimFeatures.append(p)
else:
updatedSimFeatures.append(items[0])
newTempFeatures = list(set(updatedSimFeatures + nonsimilarNumericalCols))
updatedFeatures = list(set(newTempFeatures + non_numeric_features))
else:
updatedFeatures = list(set(df.columns) -set(qconstantColumns))
else:
updatedFeatures = list(set(df.columns) -set(qconstantColumns))
return updatedFeatures
"""},
'feature_importance_class':{'name':'feature_importance_class','code':"\\n\\
\\ndef feature_importance_class(df, numeric_features, cat_features,target_feature,pValTh,corrTh):\\
\\n import pandas as pd\\
\\n from sklearn.feature_selection import chi2\\
\\n from sklearn.feature_selection import f_classif\\
\\n from sklearn.feature_selection import mutual_info_classif\\
\\n \\
\\n impFeatures = []\\
\\n if cat_features:\\
\\n categoricalData=df[cat_features]\\
\\n chiSqCategorical=chi2(categoricalData,df[target_feature])[1]\\
\\n corrSeries=pd.Series(chiSqCategorical, index=cat_features)\\
\\n impFeatures.append(corrSeries[corrSeries<pValTh].index.tolist())\\
\\n if numeric_features:\\
\\n quantData=df[numeric_features]\\
\\n fclassScore=f_classif(quantData,df[target_feature])[1]\\
\\n miClassScore=mutual_info_classif(quantData,df[target_feature])\\
\\n fClassSeries=pd.Series(fclassScore,index=numeric_features)\\
\\n miClassSeries=pd.Series(miClassScore,index=numeric_features)\\
\\n impFeatures.append(fClassSeries[fClassSeries<pValTh].index.tolist())\\
\\n impFeatures.append(miClassSeries[miClassSeries>corrTh].index.tolist())\\
\\n pearsonScore=df.corr() \\
\\n targetPScore=abs(pearsonScore[target_feature])\\
\\n impFeatures.append(targetPScore[targetPScore<pValTh].index.tolist())\\
\\n return list(set(sum(impFeatures, [])))"},
'feature_importance_reg':{'name':'feature_importance_reg','code':"\\n\\
\\ndef feature_importance_reg(df, numeric_features, target_feature,pValTh,corrTh):\\
\\n import pandas as pd\\
\\n from sklearn.feature_selection import f_regression\\
\\n from sklearn.feature_selection import mutual_info_regression\\
\\n \\
\\n impFeatures = []\\
\\n if numeric_features:\\
\\n quantData =df[numeric_features]\\
\\n fregScore=f_regression(quantData,df[target_feature])[1]\\
\\n miregScore=mutual_info_regression(quantData,df[target_feature])\\
\\n fregSeries=pd.Series(fregScore,index=numeric_features)\\
\\n miregSeries=pd.Series(miregScore,index=numeric_features)\\
\\n impFeatures.append(fregSeries[fregSeries<pValTh].index.tolist())\\
\\n impFeatures.append(miregSeries[miregSeries>corrTh].index.tolist())\\
\\n pearsonScore=df.corr()\\
\\n targetPScore=abs(pearsonScore[target_feature])\\
\\n impFeatures.append(targetPScore[targetPScore<pValTh].index.tolist())\\
\\n return list(set(sum(impFeatures, [])))"},
'scoring_criteria':{'name':'scoring_criteria','imports':[{'mod':'make_scorer','mod_from':'sklearn.metrics'},{'mod':'roc_auc_score','mod_from':'sklearn.metrics'}], 'code':"\\n\\
\\ndef scoring_criteria(score_param, problem_type, class_count):\\
\\n if problem_type == 'classification':\\
\\n scorer_mapping = {\\
\\n 'recall':{'binary_class': 'recall', 'multi_class': 'recall_weighted'},\\
\\n 'precision':{'binary_class': 'precision', 'multi_class': 'precision_weighted'},\\
\\n 'f1_score':{'binary_class': 'f1', 'multi_class': 'f1_weighted'},\\
\\n 'roc_auc':{'binary_class': 'roc_auc', 'multi_class': 'roc_auc_ovr_weighted'}\\
\\n }\\
\\n if (score_param.lower() == 'roc_auc') and (class_count > 2):\\
\\n score_param = make_scorer(roc_auc_score, needs_proba=True,multi_class='ovr',average='weighted')\\
\\n else:\\
\\n class_type = 'binary_class' if class_count == 2 else 'multi_class'\\
\\n if score_param in scorer_mapping.keys():\\
\\n score_param = scorer_mapping[score_param][class_type]\\
\\n else:\\
\\n score_param = 'accuracy'\\
\\n return score_param"},
'log_dataframe':{'name':'log_dataframe','code':f"\\n\\
\\ndef log_dataframe(df, msg=None):\\
\\n import io\\
\\n buffer = io.StringIO()\\
\\n df.info(buf=buffer)\\
\\n if msg:\\
\\n log_text = f'Data frame after {{msg}}:'\\
\\n else:\\
\\n log_text = 'Data frame:'\\
\\n log_text += '\\\\n\\\\t'+str(df.head(2)).replace('\\\\n','\\\\n\\\\t')\\
\\n log_text += ('\\\\n\\\\t' + buffer.getvalue().replace('\\\\n','\\\\n\\\\t'))\\
\\n get_logger().info(log_text)"},
'BayesSearchCV':{'name':'BayesSearchCV','imports':[{'mod':'cross_val_score','mod_from':'sklearn.model_selection'},{'mod':'fmin','mod_from':'hyperopt'},{'mod':'tpe','mod_from':'hyperopt'},{'mod':'hp','mod_from':'hyperopt'},{'mod':'STATUS_OK','mod_from':'hyperopt'},{'mod':'Trials','mod_from':'hyperopt'},{'mod':'numpy','mod_as':'np'}],'code':"\\n\\
\\nclass BayesSearchCV():\\
\\n\\
\\n def __init__(self, estimator, params |
, scoring, n_iter, cv):\\
\\n self.estimator = estimator\\
\\n self.params = params\\
\\n self.scoring = scoring\\
\\n self.iteration = n_iter\\
\\n self.cv = cv\\
\\n self.best_estimator_ = None\\
\\n self.best_score_ = None\\
\\n self.best_params_ = None\\
\\n\\
\\n def __min_fun(self, params):\\
\\n score=cross_val_score(self.estimator, self.X, self.y,scoring=self.scoring,cv=self.cv)\\
\\n acc = score.mean()\\
\\n return {'loss':-acc,'score': acc, 'status': STATUS_OK,'model' :self.estimator,'params': params}\\
\\n\\
\\n def fit(self, X, y):\\
\\n trials = Trials()\\
\\n self.X = X\\
\\n self.y = y\\
\\n best = fmin(self.__min_fun,self.params,algo=tpe.suggest, max_evals=self.iteration, trials=trials)\\
\\n result = sorted(trials.results, key = lambda x: x['loss'])[0]\\
\\n self.best_estimator_ = result['model']\\
\\n self.best_score_ = result['score']\\
\\n self.best_params_ = result['params']\\
\\n self.best_estimator_.fit(X, y)\\
\\n\\
\\n def hyperOptParamConversion( paramSpace):\\
\\n paramDict = {}\\
\\n for j in list(paramSpace.keys()):\\
\\n inp = paramSpace[j]\\
\\n isLog = False\\
\\n isLin = False\\
\\n isRan = False\\
\\n isList = False\\
\\n isString = False\\
\\n try:\\
\\n # check if functions are given as input and reassign paramspace\\
\\n v = paramSpace[j]\\
\\n if 'logspace' in paramSpace[j]:\\
\\n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\\
\\n isLog = True\\
\\n elif 'linspace' in paramSpace[j]:\\
\\n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\\
\\n isLin = True\\
\\n elif 'range' in paramSpace[j]:\\
\\n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\\
\\n isRan = True\\
\\n elif 'list' in paramSpace[j]:\\
\\n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\\
\\n isList = True\\
\\n elif '[' and ']' in paramSpace[j]:\\
\\n paramSpace[j] = v.split('[')[1].split(']')[0].replace(' ', '')\\
\\n isList = True\\
\\n x = paramSpace[j].split(',')\\
\\n except:\\
\\n x = paramSpace[j]\\
\\n str_arg = paramSpace[j]\\
\\n\\
\\n # check if arguments are string\\
\\n try:\\
\\n test = eval(x[0])\\
\\n except:\\
\\n isString = True\\
\\n\\
\\n if isString:\\
\\n paramDict.update({j: hp.choice(j, x)})\\
\\n else:\\
\\n res = eval(str_arg)\\
\\n if isLin:\\
\\n y = eval('np.linspace' + str(res))\\
\\n paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))})\\
\\n elif isLog:\\
\\n y = eval('np.logspace' + str(res))\\
\\n paramDict.update(\\
\\n {j: hp.uniform(j, 10 ** eval(x[0]), 10 ** eval(x[1]))})\\
\\n elif isRan:\\
\\n y = eval('np.arange' + str(res))\\
\\n paramDict.update({j: hp.choice(j, y)})\\
\\n # check datatype of argument\\
\\n elif isinstance(eval(x[0]), bool):\\
\\n y = list(map(lambda i: eval(i), x))\\
\\n paramDict.update({j: hp.choice(j, eval(str(y)))})\\
\\n elif isinstance(eval(x[0]), float):\\
\\n res = eval(str_arg)\\
\\n if len(str_arg.split(',')) == 3 and not isList:\\
\\n y = eval('np.linspace' + str(res))\\
\\n #print(y)\\
\\n paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))})\\
\\n else:\\
\\n y = list(res) if isinstance(res, tuple) else [res]\\
\\n paramDict.update({j: hp.choice(j, y)})\\
\\n else:\\
\\n res = eval(str_arg)\\
\\n if len(str_arg.split(',')) == 3 and not isList:\\
\\n y = eval('np.linspace' +str(res)) if eval(x[2]) >= eval(x[1]) else eval('np.arange'+str(res))\\
\\n else:\\
\\n y = list(res) if isinstance(res, tuple) else [res]\\
\\n paramDict.update({j: hp.choice(j, y)})\\
\\n return paramDict"},
's2n':{'name':'s2n','imports':[{'mod':'word2number','mod_as':'w2n'},{'mod':'numpy','mod_as':'np'}],'code':"\\n\\
\\ndef s2n(value):\\
\\n try:\\
\\n x=eval(value)\\
\\n return x\\
\\n except:\\
\\n try:\\
\\n return w2n.word_to_num(value)\\
\\n except:\\
\\n return np.nan"},
'readWrite':{'name':'readWrite','imports':[{'mod':'json'},{'mod':'pandas','mod_as':'pd'}],'code':"\\n\\
\\ndef read_json(file_path):\\
\\n data = None\\
\\n with open(file_path,'r') as f:\\
\\n data = json.load(f)\\
\\n return data\\
\\n\\
\\ndef write_json(data, file_path):\\
\\n with open(file_path,'w') as f:\\
\\n json.dump(data, f)\\
\\n\\
\\ndef read_data(file_path, encoding='utf-8', sep=','):\\
\\n return pd.read_csv(file_path, encoding=encoding, sep=sep)\\
\\n\\
\\ndef write_data(data, file_path, index=False):\\
\\n return data.to_csv(file_path, index=index)\\
\\n\\
\\n#Uncomment and change below code for google storage\\
\\n#def write_data(data, file_path, index=False):\\
\\n# file_name= file_path.name\\
\\n# data.to_csv('output_data.csv')\\
\\n# storage_client = storage.Client()\\
\\n# bucket = storage_client.bucket('aion_data')\\
\\n# bucket.blob('prediction/'+file_name).upload_from_filename('output_data.csv', content_type='text/csv')\\
\\n# return data\\
\\n\\
\\ndef is_file_name_url(file_name):\\
\\n supported_urls_starts_with = ('gs://','https://','http://')\\
\\n return file_name.startswith(supported_urls_starts_with)\\
\\n"},
'logger':{'name':'set_logger','imports':[{'mod':'logging'}],'code':f"\\n\\
\\nlog = None\\
\\ndef set_logger(log_file, mode='a'):\\
\\n global log\\
\\n logging.basicConfig(filename=log_file, filemode=mode, format='%(asctime)s %(name)s- %(message)s', level=logging.INFO, datefmt='%d-%b-%y %H:%M:%S')\\
\\n log = logging.getLogger(Path(__file__).parent.name)\\
\\n return log\\
\\n\\
\\ndef get_logger():\\
\\n return log\\n"},
'mlflowSetPath':{'name':'mlflowSetPath','code':f"\\n\\ndef mlflowSetPath(path, name):\\
\\n{self.tab}db_name = str(Path(path)/'mlruns')\\
\\n{self.tab}mlflow.set_tracking_uri('file:///' + db_name)\\
\\n{self.tab}mlflow.set_experiment(str(Path(path).name))\\
\\n"},
'mlflow_create_experiment':{'name':'mlflow_create_experiment','code':f"\\n\\ndef mlflow_create_experiment(config, path, name):\\
\\n{self.tab}tracking_uri, artifact_uri, registry_uri = get_mlflow_uris(config, path)\\
\\n{self.tab}mlflow.tracking.set_tracking_uri(tracking_uri)\\
\\n{self.tab}mlflow.tracking.set_registry_uri(registry_uri)\\
\\n{self.tab}client = mlflow.tracking.MlflowClient()\\
\\n{self.tab}experiment = client.get_experiment_by_name(name)\\
\\n{self.tab}if experiment:\\
\\n{self.tab}{self.tab}experiment_id = experiment.experiment_id\\
\\n{self.tab}else:\\
\\n{self.tab}{self.tab}experiment_id = client.create_experiment(name, artifact_uri)\\
\\n{self.tab}return client, experiment_id\\
\\n"},
'get_mlflow_uris':{'name':'get_mlflow_uris','code':f"\\n\\ndef get_mlflow_uris(config, path):\\
\\n artifact_uri = None\\
\\n tracking_uri_type = config.get('tracking_uri_type',None)\\
\\n if tracking_uri_type == 'localDB':\\
\\n tracking_uri = 'sqlite:///' + str(path.resolve()/'mlruns.db')\\
\\n elif tracking_uri_type == 'server' and config.get('tracking_uri', None):\\
\\n tracking_uri = config['tracking_uri']\\
\\n if config.get('artifacts_uri', None):\\
\\n if Path(config['artifacts_uri']).exists():\\
\\n artifact_uri = 'file:' + config['artifacts_uri']\\
\\n else:\\
\\n artifact_uri = config['artifacts_uri']\\
\\n else:\\
\\n artifact_uri = 'file:' + str(path.resolve()/'mlruns')\\
\\n else:\\
\\n tracking_uri = 'file:' + str(path.resolve()/'mlruns')\\
\\n artifact_uri = None\\
\\n if config.get('registry_uri', None):\\
\\n registry_uri = config['registry_uri']\\
\\n else:\\
\\n registry_uri = 'sqlite:///' + str(path.resolve()/'registry.db')\\
\\n return tracking_uri, artifact_uri, registry_uri\\
\\n"},
'logMlflow':{'name':'logMlflow','code':f"\\n\\ndef logMlflow( params, metrices, estimator,tags={{}}, algoName=None):\\
\\n{self.tab}run_id = None\\
\\n{self.tab}for k,v in params.items():\\
\\n{self.tab}{self.tab}mlflow.log_param(k, v)\\
\\n{self.tab}for k,v in metrices.items():\\
\\n{self.tab}{self.tab}mlflow.log_metric(k, v)\\
\\n{self.tab}if 'CatBoost' in algoName:\\
\\n{self.tab}{self.tab}model_info = mlflow.catboost.log_model(estimator, 'model')\\
\\n{self.tab}else:\\
\\n{self.tab}{self.tab}model_info = mlflow.sklearn.log_model(sk_model=estimator, artifact_path='model')\\
\\n{self.tab}tags['processed'] = 'no'\\
\\n{self.tab}tags['registered'] = 'no'\\
\\n{self.tab}mlflow.set_tags(tags)\\
\\n{self.tab}if model_info:\\
\\n{self.tab}{self.tab}run_id = model_info.run_id\\
\\n{self.tab}return run_id\\
\\n"},
'classification_metrices':{'name':'classification_metrices','imports':[{'mod':'sklearn'},{'mod':'math'}],'code':"\\ndef get_classification_metrices( actual_values, predicted_values):\\
\\n result = {}\\
\\n accuracy_score = sklearn.metrics.accuracy_score(actual_values, predicted_values)\\
\\n avg_precision = sklearn.metrics.precision_score(actual_values, predicted_values,\\
\\n average='macro')\\
\\n avg_recall = sklearn.metrics.recall_score(actual_values, predicted_values,\\
\\n average='macro')\\
\\n avg_f1 = sklearn.metrics.f1_score(actual_values, predicted_values,\\
\\n average='macro')\\
\\n\\
\\n result['accuracy'] = math.floor(accuracy_score*10000 |
)/100\\
\\n result['precision'] = math.floor(avg_precision*10000)/100\\
\\n result['recall'] = math.floor(avg_recall*10000)/100\\
\\n result['f1'] = math.floor(avg_f1*10000)/100\\
\\n return result\\
\\n"},
'regression_metrices':{'name':'regression_metrices','imports':[{'mod':'numpy', 'mod_as':'np'}],'code':"\\ndef get_regression_metrices( actual_values, predicted_values):\\
\\n result = {}\\
\\n\\
\\n me = np.mean(predicted_values - actual_values)\\
\\n sde = np.std(predicted_values - actual_values, ddof = 1)\\
\\n\\
\\n abs_err = np.abs(predicted_values - actual_values)\\
\\n mae = np.mean(abs_err)\\
\\n sdae = np.std(abs_err, ddof = 1)\\
\\n\\
\\n abs_perc_err = 100.*np.abs(predicted_values - actual_values) / actual_values\\
\\n mape = np.mean(abs_perc_err)\\
\\n sdape = np.std(abs_perc_err, ddof = 1)\\
\\n\\
\\n result['mean_error'] = me\\
\\n result['mean_abs_error'] = mae\\
\\n result['mean_abs_perc_error'] = mape\\
\\n result['error_std'] = sde\\
\\n result['abs_error_std'] = sdae\\
\\n result['abs_perc_error_std'] = sdape\\
\\n return result\\
\\n"}
}
def add_function(self, name, importer=None):
if name in self.available_functions.keys():
self.codeText += self.available_functions[name]['code']
if importer:
if 'imports' in self.available_functions[name].keys():
for module in self.available_functions[name]['imports']:
mod_name = module['mod']
mod_from = module.get('mod_from', None)
mod_as = module.get('mod_as', None)
importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as)
def get_function_name(self, name):
if name in self.available_functions.keys():
return self.available_functions[name]['name']
return None
def getCode(self):
return self.codeText
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from importlib.metadata import version
import sys
class importModule():
def __init__(self):
self.importModule = {}
self.stdlibModule = []
self.localModule = {}
def addLocalModule(self,module, mod_from=None, mod_as=None):
if module == '*':
if module not in self.localModule.keys():
self.localModule[module]= [mod_from]
else:
self.localModule[module].append(mod_from)
elif module not in self.localModule.keys():
self.localModule[module] = {'from':mod_from, 'as':mod_as}
def addModule(self, module, mod_from=None, mod_as=None):
if module not in self.importModule.keys():
self.importModule[module] = {'from':mod_from, 'as':mod_as}
if module in sys.stdlib_module_names:
self.stdlibModule.append(module)
elif isinstance(self.importModule[module], list):
if mod_as not in [x['as'] for x in self.importModule[module]]:
self.importModule[module].append({'from':mod_from, 'as':mod_as})
elif mod_as not in [x['from'] for x in self.importModule[module]]:
self.importModule[module].append({'from':mod_from, 'as':mod_as})
elif mod_as != self.importModule[module]['as']:
as_list = [self.importModule[module]]
as_list.append({'from':mod_from, 'as':mod_as})
self.importModule[module] = as_list
elif mod_from != self.importModule[module]['from']:
as_list = [self.importModule[module]]
as_list.append({'from':mod_from, 'as':mod_as})
self.importModule[module] = as_list
def getModules(self):
return (self.importModule, self.stdlibModule)
def getBaseModule(self, extra_importers=[]):
modules_alias = { 'sklearn':'scikit-learn',
'genetic_selection':'sklearn-genetic',
'google': 'google-cloud-storage',
'azure':'azure-storage-file-datalake'}
local_modules = {'AIX':'/app/AIX-0.1-py3-none-any.whl'}
modules = []
require = ""
if extra_importers:
extra_importers = [importer.importModule for importer in extra_importers if isinstance(importer, importModule)]
importers_module = [self.importModule] + extra_importers
for importer_module in importers_module:
for k,v in importer_module.items():
if v['from']:
mod = v['from'].split('.')[0]
else:
mod = k
if mod in modules_alias.keys():
mod = modules_alias[mod]
modules.append(mod)
modules = list(set(modules))
for mod in modules:
try:
if mod in local_modules.keys():
require += f"{local_modules[mod]}\\n"
else:
require += f"{mod}=={version(mod)}\\n"
except :
if mod not in sys.stdlib_module_names:
raise
return require
def getCode(self):
def to_string(k, v):
mod = ''
if v['from']:
mod += 'from {} '.format(v['from'])
mod += 'import {}'.format(k)
if v['as']:
mod += ' as {} '.format(v['as'])
return mod
modules = ""
local_modules = ""
std_lib_modules = ""
third_party_modules = ""
for k,v in self.importModule.items():
if k in self.stdlibModule:
std_lib_modules = std_lib_modules + '\\n' + to_string(k, v)
elif isinstance(v, dict):
third_party_modules = third_party_modules + '\\n' + to_string(k, v)
elif isinstance(v, list):
for alias in v:
third_party_modules = third_party_modules + '\\n' + to_string(k, alias)
for k,v in self.localModule.items():
if k != '*':
local_modules = local_modules + '\\n' + to_string(k, v)
else:
for mod_from in v:
local_modules = local_modules + '\\n' + f'from {mod_from} import {k}'
if std_lib_modules:
modules = modules + "\\n#Standard Library modules" + std_lib_modules
if third_party_modules:
modules = modules + "\\n\\n#Third Party modules" + third_party_modules
if local_modules:
modules = modules + "\\n\\n#local modules" + local_modules + '\\n'
return modules
def copyCode(self, importer):
self.importModule, self.stdlibModule = importer.getModules()
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class learner():
def __init__(self, problem_type="classification", target_feature="", sample_method=None,indent=0, tab_size=4):
self.tab = " "*tab_size
self.df_name = 'df'
self.problem_type = problem_type
self.target_feature = target_feature
self.search_space = []
self.codeText = f"\\ndef train(log):"
self.input_files = {}
self.output_files = {}
self.function_code = ''
self.addInputFiles({'inputData' : 'featureEngineeredData.dat','testData' : 'test.dat', 'metaData' : 'modelMetaData.json','monitor':'monitoring.json','log' : 'aion.log'})
def addInputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def addOutputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def getInputFiles(self):
text = 'IOFiles = '
if not self.input_files:
text += '{ }'
else:
text += json.dumps(self.input_files, indent=4)
return text
def getOutputFiles(self):
text = 'output_file = '
if not self.output_files:
text += '{ }'
else:
text += json.dumps(self.output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\\n'
text += self.getInputFiles()
if indent:
text = text.replace('\\n', self.tab * indent + '\\n')
return text
def __addValidateConfigCode(self):
text = "\\n\\
\\ndef validateConfig():\\
\\n config_file = Path(__file__).parent/'config.json'\\
\\n if not Path(config_file).exists():\\
\\n raise ValueError(f'Config file is missing: {config_file}')\\
\\n config = read_json(config_file)\\
\\n return config"
return text
def __addSaveModelCode(self):
text = "\\n\\
\\ndef save_model( experiment_id, estimator, features, metrices, params,tags, scoring):\\
\\n # mlflow log model, metrices and parameters\\
\\n with mlflow.start_run(experiment_id = experiment_id, run_name = model_name):\\
\\n return logMlflow(params, metrices, estimator, tags, model_name.split('_')[0])"
return text
def addStatement(self, statement, indent=1):
self.codeText += '\\n' + self.tab * indent + statement
def getCode(self):
return self.function_code + '\\n' + self.codeText
def addLocalFunctionsCode(self):
self.function_code += self.__addValidateConfigCode()
self.function_code += self.__addSaveModelCode()
def getPrefixModules(self):
modules = [{'module':'Path', 'mod_from':'pathlib'}
,{'module':'pandas', 'mod_as':'pd'}
]
return modules
def addPrefixCode(self, indent=1):
self.codeText += "\\
\\n config = validateConfig()\\
\\n targetPath = Path('aion')/config['targetPath']\\
\\n if not targetPath.exists():\\
\\n raise ValueError(f'targetPath does not exist')\\
\\n meta_data_file = targetPath/IOFiles['metaData']\\
\\n if meta_data_file.exists():\\
\\n meta_data = read_json(meta_data_file)\\
\\n else:\\
\\n raise ValueError(f'Configuration file not found: {meta_data_file}')\\
\\n log_file = targetPath/IOFiles['log']\\
\\n log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)\\
\\n dataLoc = targetPath/IOFiles['inputData']\\
\\n if not dataLoc.exists():\\
\\n return {'Status':'Failure','Message':'Data location does not exists.'}\\
\\n\\
\\n status = dict()\\
\\n usecase = config['targetPath']\\
\\n df = pd.read_csv(dataLoc)\\
\\n prev_step_output = meta_data['featureengineering']['Status']"
def getSuffixModules(self):
modules = [{'module':'platform'}
,{'module':'time'}
,{'module':'mlflow'}
]
return modules
def add_100_trainsize_code(self):
self.codeText +="\\n\\
\\n else:\\
\\n test_score = train_score\\
\\n metrices = {}"
def addSuffixCode(self, indent=1):
self.codeText += "\\n\\
\\n meta_data['training'] = {}\\
\\n meta_data['training']['features'] = features\\
\\n scoring = config['scoring_criteria']\\
\\n tags = {'estimator_name': model_name}\\
\\n monitoring_data = read_json(targetPath/IOFiles['monitor'])\\
\\n mlflow_default_config = {'artifacts_uri':'','tracking_uri_type':'','tracking_uri':'','registry_uri':''}\\
\\n mlflow_client, experiment_id = mlflow_create_experiment(monitoring_data.get('mlflow_config',mlflow_default_config), targetPath, usecase)\\
\\n run_id = save_model(experiment_id, estimator,features, metrices,best_params,tags,scoring)\\
\\n write_json(meta_data, targetPath/IOFiles['metaDataOutput'])\\
\\n write_json({'scoring_criteria': scoring, 'metrices':metrices, 'param':best_params}, targetPath/IOFiles['performance'])\\
\\n\\
\\n # return status\\
\\n status = {'Status':'Success','mlflow_run_id':run_id,'FeaturesUsed':features,'test_score':metrices['test_score'],'train |
_score':metrices['train_score']}\\
\\n log.info(f'Test score: {test_score}')\\
\\n log.info(f'Train score: {train_score}')\\
\\n log.info(f'MLflow run id: {run_id}')\\
\\n log.info(f'output: {status}')\\
\\n return json.dumps(status)"
def getMainCodeModules(self):
modules = [{'module':'Path', 'mod_from':'pathlib'}
,{'module':'sys'}
,{'module':'json'}
,{'module':'logging'}
]
return modules
def addMainCode(self, indent=1):
self.codeText += "\\n\\
\\nif __name__ == '__main__':\\
\\n log = None\\
\\n try:\\
\\n print(train(log))\\
\\n except Exception as e:\\
\\n if log:\\
\\n log.error(e, exc_info=True)\\
\\n status = {'Status':'Failure','Message':str(e)}\\
\\n print(json.dumps(status))\\
"
def add_variable(self, name, value, indent=1):
if isinstance(value, str):
self.codeText += f"\\n{self.tab * indent}{name} = '{value}'"
else:
self.codeText += f"\\n{self.tab * indent}{name} = {value}"
def addStatement(self, statement, indent=1):
self.codeText += f"\\n{self.tab * indent}{statement}"
def add_search_space_w(self, algoritms):
for model, params in algoritms.items():
d = {'clf': f"[{model}()]"}
for k,v in params.items():
if isinstance(v, str):
d[f'clf__{k}']=f"'{v}'"
else:
d[f'clf__{k}']= f"{v}"
self.search_space.append(d)
def add_search_space(self, indent=1):
self.codeText += f"\\n{self.tab}search_space = config['search_space']"
def add_train_test_split(self, train_feature, target_feature,test_ratio, indent=1):
self.codeText += "\\n\\n # split the data for training\\
\\n selected_features = prev_step_output['selected_features']\\
\\n target_feature = config['target_feature']\\
\\n train_features = prev_step_output['total_features'].copy()\\
\\n train_features.remove(target_feature)\\
\\n X_train = df[train_features]\\
\\n y_train = df[target_feature]\\
\\n if config['test_ratio'] > 0.0:\\
\\n test_data = read_data(targetPath/IOFiles['testData'])\\
\\n X_test = test_data[train_features]\\
\\n y_test = test_data[target_feature]\\
\\n else:\\
\\n X_test = pd.DataFrame()\\
\\n y_test = pd.DataFrame()"
def add_model_fit(self, estimator, optimizer, selector_method, importer, indent=1):
# need to adjust the indent
importer.addModule('importlib')
importer.addModule('operator')
text = f"\\n features = selected_features['{selector_method}']\\
\\n estimator = {estimator}()\\
\\n param = config['algorithms']['{estimator}']"
if optimizer == 'GridSearchCV':
text += "\\n grid = GridSearchCV(estimator, param,cv=config['optimization_param']['trainTestCVSplit'])\\
\\n grid.fit(X_train[features], y_train)\\
\\n train_score = grid.best_score_ * 100\\
\\n best_params = grid.best_params_\\
\\n estimator = grid.best_estimator_"
elif optimizer == 'GeneticSelectionCV':
text += "\\n grid = GeneticSelectionCV(estimator, scoring=scorer, n_generations=config['optimization_param']['iterations'],cv=config['optimization_param']['trainTestCVSplit'],n_population=config['optimization_param']['geneticparams']['n_population'],crossover_proba=config['optimization_param']['geneticparams']['crossover_proba'],mutation_proba=config['optimization_param']['geneticparams']['mutation_proba'],crossover_independent_proba=config['optimization_param']['geneticparams']['crossover_independent_proba'],mutation_independent_proba=config['optimization_param']['geneticparams']['mutation_independent_proba'],tournament_size=config['optimization_param']['geneticparams']['tournament_size'],n_gen_no_change=config['optimization_param']['geneticparams']['n_gen_no_change'])\\
\\n grid.fit(X_train[features], y_train)\\
\\n train_score = grid.score(X_train[features], y_train)\\
\\n best_params = grid.estimator_.get_params()\\
\\n estimator = grid.estimator_"
else:
text += f"\\n grid = {optimizer}(estimator, param, scoring=scorer, n_iter=config['optimization_param']['iterations'],cv=config['optimization_param']['trainTestCVSplit'])\\
\\n grid.fit(X_train[features], y_train)\\
\\n train_score = grid.best_score_ * 100\\
\\n best_params = grid.best_params_\\
\\n estimator = grid.best_estimator_"
self.codeText += text
def addLearner(self, model_name, params, importer, indent=1):
importer.addModule('Pipeline', mod_from='sklearn.pipeline')
importer.addModule('ColumnTransformer', mod_from='sklearn.compose')
importer.addModule('confusion_matrix', mod_from='sklearn.metrics')
model_params = []
for k,v in params.items():
if isinstance(v, str):
model_params.append(f"{k}='{v}'")
else:
model_params.append(f"{k}={v}")
model_params = ",".join(model_params)
self.codeText += self.getTransformer()
text = f"\\n{self.tab * indent}pipeline = Pipeline(steps = [('preprocessor', preprocessor),('learner',{model_name}({model_params}))])"
self.codeText += text
self.codeText += self.splitTargetFeature(importer)
if self.balancing:
self.codeText += self.balancingCode(importer)
self.codeText += self.fitModelCode(importer)
def splitTargetFeature(self, importer, indent=1):
importer.addModule('train_test_split', mod_from='sklearn.model_selection')
return f"\\n{self.tab * indent}target = df['{self.target_feature}']\\
\\n{self.tab * indent}df = df.drop(['{self.target_feature}'], axis=1)\\
\\n{self.tab * indent}X_train, X_test, y_train, y_test = train_test_split(df,target, train_size = percentage/100.0)"
def getCode_remove(self, model_name=None, indent=1):
return self.codeText
def getDFName(self):
return self.df_name
def copyCode(self, learner):
self.codeText = learner.getCode()
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
class input_drift():
def __init__(self, tab_size=4):
self.tab = ' ' * tab_size
self.codeText = ''
def addInputDriftClass(self):
text = "\\
\\nclass inputdrift():\\
\\n\\
\\n def __init__(self,base_config):\\
\\n self.usecase = base_config['modelName'] + '_' + base_config['modelVersion']\\
\\n self.currentDataLocation = base_config['currentDataLocation']\\
\\n home = Path.home()\\
\\n if platform.system() == 'Windows':\\
\\n from pathlib import WindowsPath\\
\\n output_data_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'Data'\\
\\n output_model_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'target'/self.usecase\\
\\n else:\\
\\n from pathlib import PosixPath\\
\\n output_data_dir = PosixPath(home)/'HCLT'/'AION'/'Data'\\
\\n output_model_dir = PosixPath(home)/'HCLT'/'AION'/'target'/self.usecase\\
\\n if not output_model_dir.exists():\\
\\n raise ValueError(f'Configuration file not found at {output_model_dir}')\\
\\n\\
\\n tracking_uri = 'file:///' + str(Path(output_model_dir)/'mlruns')\\
\\n registry_uri = 'sqlite:///' + str(Path(output_model_dir)/'mlruns.db')\\
\\n mlflow.set_tracking_uri(tracking_uri)\\
\\n mlflow.set_registry_uri(registry_uri)\\
\\n client = mlflow.tracking.MlflowClient(\\
\\n tracking_uri=tracking_uri,\\
\\n registry_uri=registry_uri,\\
\\n )\\
\\n model_version_uri = 'models:/{model_name}/production'.format(model_name=self.usecase)\\
\\n model = mlflow.pyfunc.load_model(model_version_uri)\\
\\n run = client.get_run(model.metadata.run_id)\\
\\n if run.info.artifact_uri.startswith('file:'):\\
\\n artifact_path = Path(run.info.artifact_uri[len('file:///') : ])\\
\\n else:\\
\\n artifact_path = Path(run.info.artifact_uri)\\
\\n self.trainingDataPath = artifact_path/(self.usecase + '_data.csv')\\
\\n\\
\\n def get_input_drift(self,current_data, historical_data):\\
\\n curr_num_feat = current_data.select_dtypes(include='number')\\
\\n hist_num_feat = historical_data.select_dtypes(include='number')\\
\\n num_features = [feat for feat in historical_data.columns if feat in curr_num_feat]\\
\\n alert_count = 0\\
\\n data = {\\
\\n 'current':{'data':current_data},\\
\\n 'hist': {'data': historical_data}\\
\\n }\\
\\n dist_changed_columns = []\\
\\n dist_change_message = []\\
\\n for feature in num_features:\\
\\n curr_static_value = st.ks_2samp( hist_num_feat[feature], curr_num_feat[feature]).pvalue\\
\\n if (curr_static_value < 0.05):\\
\\n distribution = {}\\
\\n distribution['hist'] = self.DistributionFinder( historical_data[feature])\\
\\n distribution['curr'] = self.DistributionFinder( current_data[feature])\\
\\n if(distribution['hist']['name'] == distribution['curr']['name']):\\
\\n pass\\
\\n else:\\
\\n alert_count = alert_count + 1\\
\\n dist_changed_columns.append(feature)\\
\\n changed_column = {}\\
\\n changed_column['Feature'] = feature\\
\\n changed_column['KS_Training'] = curr_static_value\\
\\n changed_column['Training_Distribution'] = distribution['hist']['name']\\
\\n changed_column['New_Distribution'] = distribution['curr']['name']\\
\\n dist_change_message.append(changed_column)\\
\\n if alert_count:\\
\\n resultStatus = dist_change_message\\
\\n else :\\
\\n resultStatus='Model is working as expected'\\
\\n return(alert_count, resultStatus)\\
\\n\\
\\n def DistributionFinder(self,data):\\
\\n best_distribution =''\\
\\n best_sse =0.0\\
\\n if(data.dtype in ['int','int64']):\\
\\n distributions= {'bernoulli':{'algo':st.bernoulli},\\
\\n 'binom':{'algo':st.binom},\\
\\n 'geom':{'algo':st.geom},\\
\\n 'nbinom':{'algo':st.nbinom},\\
\\n 'poisson':{'algo':st.poisson}\\
\\n }\\
\\n index, counts = np.unique(data.astype(int),return_counts=True)\\
\\n if(len(index)>=2):\\
\\n best_sse = np.inf\\
\\n y1=[]\\
\\n total=sum(counts)\\
\\n mean=float(sum(index*counts))/total\\
\\n variance=float((sum(index**2*counts) -total*mean**2))/(total-1)\\
\\n dispersion=mean/float(variance)\\
\\n theta=1/float(dispersion)\\
\\n r=mean*(float(theta)/1-theta)\\
\\n\\
\\n for j in counts:\\
\\n y1.append(float(j)/total)\\
\\n distributions['bernoulli']['pmf'] = distributions['bernoulli']['algo'].pmf(index,mean)\\
\\n distributions['binom']['pmf'] = distributions['binom']['algo'].pmf(index,len(index),p=mean/len(index))\\
\\n distributions['geom']['pmf'] = distributions['geom']['algo'].pmf(index,1/float(1+mean))\\
\\n distributions['nbinom']['pmf'] = distributions['nbinom']['algo'].pmf(index,mean,r)\\
\\n distributions['poisson']['pmf'] = distributions['poisson']['algo'].pmf(index,mean)\\
\\n\\
\\n sselist = []\\
\\n for dist in distributions.keys():\\
\\n distributions[dist]['sess'] = np.sum(np.power(y1 - distributions[dist]['pmf'], 2.0))\\
\\n if np.isnan(distributions[dist]['s |
ess']):\\
\\n distributions[dist]['sess'] = float('inf')\\
\\n best_dist = min(distributions, key=lambda v: distributions[v]['sess'])\\
\\n best_distribution = best_dist\\
\\n best_sse = distributions[best_dist]['sess']\\
\\n\\
\\n elif (len(index) == 1):\\
\\n best_distribution = 'Constant Data-No Distribution'\\
\\n best_sse = 0.0\\
\\n elif(data.dtype in ['float64','float32']):\\
\\n distributions = [st.uniform,st.expon,st.weibull_max,st.weibull_min,st.chi,st.norm,st.lognorm,st.t,st.gamma,st.beta]\\
\\n best_distribution = st.norm.name\\
\\n best_sse = np.inf\\
\\n nrange = data.max() - data.min()\\
\\n\\
\\n y, x = np.histogram(data.astype(float), bins='auto', density=True)\\
\\n x = (x + np.roll(x, -1))[:-1] / 2.0\\
\\n\\
\\n for distribution in distributions:\\
\\n with warnings.catch_warnings():\\
\\n warnings.filterwarnings('ignore')\\
\\n params = distribution.fit(data.astype(float))\\
\\n arg = params[:-2]\\
\\n loc = params[-2]\\
\\n scale = params[-1]\\
\\n pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)\\
\\n sse = np.sum(np.power(y - pdf, 2.0))\\
\\n if( sse < best_sse):\\
\\n best_distribution = distribution.name\\
\\n best_sse = sse\\
\\n\\
\\n return {'name':best_distribution, 'sse': best_sse}\\
\\n\\
"
return text
def addSuffixCode(self, indent=1):
text ="\\n\\
\\ndef check_drift( config):\\
\\n inputdriftObj = inputdrift(config)\\
\\n historicaldataFrame=pd.read_csv(inputdriftObj.trainingDataPath)\\
\\n currentdataFrame=pd.read_csv(inputdriftObj.currentDataLocation)\\
\\n dataalertcount,message = inputdriftObj.get_input_drift(currentdataFrame,historicaldataFrame)\\
\\n if message == 'Model is working as expected':\\
\\n output_json = {'status':'SUCCESS','data':{'Message':'Model is working as expected'}}\\
\\n else:\\
\\n output_json = {'status':'SUCCESS','data':{'Affected Columns':message}}\\
\\n return(output_json)\\
\\n\\
\\nif __name__ == '__main__':\\
\\n try:\\
\\n if len(sys.argv) < 2:\\
\\n raise ValueError('config file not present')\\
\\n config = sys.argv[1]\\
\\n if Path(config).is_file() and Path(config).suffix == '.json':\\
\\n with open(config, 'r') as f:\\
\\n config = json.load(f)\\
\\n else:\\
\\n config = json.loads(config)\\
\\n output = check_drift(config)\\
\\n status = {'Status':'Success','Message':output}\\
\\n print('input_drift:'+json.dumps(status))\\
\\n except Exception as e:\\
\\n status = {'Status':'Failure','Message':str(e)}\\
\\n print('input_drift:'+json.dumps(status))"
return text
def addStatement(self, statement, indent=1):
self.codeText += '\\n' + self.tab * indent + statement
def generateCode(self):
self.codeText += self.addInputDriftClass()
self.codeText += self.addSuffixCode()
def getCode(self):
return self.codeText
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class tabularDataReader():
def __init__(self, tab_size=4):
self.tab = ' ' * tab_size
self.function_code = ''
self.codeText = ''
self.code_generated = False
def getInputFiles(self):
IOFiles = {
"rawData": "rawData.dat",
"metaData" : "modelMetaData.json",
"log" : "aion.log",
"outputData" : "rawData.dat",
"monitoring":"monitoring.json",
"prodData": "prodData",
"prodDataGT":"prodDataGT"
}
text = 'IOFiles = '
if not IOFiles:
text += '{ }'
else:
text += json.dumps(IOFiles, indent=4)
return text
def getOutputFiles(self):
output_files = {
'metaData' : 'modelMetaData.json',
'log' : 'aion.log',
'outputData' : 'rawData.dat'
}
text = 'output_file = '
if not output_files:
text += '{ }'
else:
text += json.dumps(output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\\n'
text += self.getInputFiles()
if indent:
text = text.replace('\\n', self.tab * indent + '\\n')
return text
def __addValidateConfigCode(self):
text = "\\n\\
\\ndef validateConfig():\\
\\n config_file = Path(__file__).parent/'config.json'\\
\\n if not Path(config_file).exists():\\
\\n raise ValueError(f'Config file is missing: {config_file}')\\
\\n config = read_json(config_file)\\
\\n if not config['targetPath']:\\
\\n raise ValueError(f'Target Path is not configured')\\
\\n return config"
return text
def addMainCode(self):
self.codeText += "\\n\\
\\nif __name__ == '__main__':\\
\\n log = None\\
\\n try:\\
\\n print(load_data(log))\\
\\n except Exception as e:\\
\\n if log:\\
\\n log.getLogger().error(e, exc_info=True)\\
\\n status = {'Status':'Failure','Message':str(e)}\\
\\n print(json.dumps(status))\\
\\n raise Exception(str(e))\\
"
def addLoadDataCode(self):
self.codeText += """
#This function will read the data and save the data on persistent storage
def load_data(log):
config = validateConfig()
targetPath = Path('aion')/config['targetPath']
targetPath.mkdir(parents=True, exist_ok=True)
log_file = targetPath/IOFiles['log']
log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)
monitoring = targetPath/IOFiles['monitoring']
if monitoring.exists():
monitoringStatus = read_json(monitoring)
if monitoringStatus['dataLocation'] == '' and monitoringStatus['driftStatus'] != 'No Drift':
reader = dataReader(reader_type=monitoring_data.get('prod_db_type','sqlite'),target_path=targetPath, config=config.get('db_config',None))
raw_data_location = targetPath/IOFiles['rawData']
if reader.file_exists(IOFiles['prodData']) and reader.file_exists(IOFiles['prodDataGT']):
predicted_data = reader.read(IOFiles['prodData'])
actual_data = reader.read(IOFiles['prodDataGT'])
common_col = [k for k in predicted_data.columns.tolist() if k in actual_data.columns.tolist()]
mergedRes = pd.merge(actual_data, predicted_data, on =common_col,how = 'inner')
raw_data_path = pd.read_csv(raw_data_location)
df = pd.concat([raw_data_path,mergedRes])
else:
raise ValueError(f'Prod Data not found')
elif monitoringStatus['dataLocation'] == '':
raise ValueError(f'Data Location does not exist')
else:
if 's3' in monitoringStatus.keys():
input_reader = dataReader(reader_type='s3',target_path=None, config=monitoringStatus['s3'])
log.info(f"Downloading '{monitoringStatus['s3']['file_name']}' from s3 bucket '{monitoringStatus['s3']['bucket_name']}'")
df = input_reader.read(monitoringStatus['s3']['file_name'])
else:
location = monitoringStatus['dataLocation']
log.info(f'Dataset path: {location}')
df = read_data(location)
else:
raise ValueError(f'Monitoring.json does not exist')
status = {}
output_data_path = targetPath/IOFiles['outputData']
log.log_dataframe(df)
required_features = list(set(config['selected_features'] + [config['target_feature']]))
log.info('Dataset features required: ' + ','.join(required_features))
missing_features = [x for x in required_features if x not in df.columns.tolist()]
if missing_features:
raise ValueError(f'Some feature/s is/are missing: {missing_features}')
log.info('Removing unused features: '+','.join(list(set(df.columns) - set(required_features))))
df = df[required_features]
log.info(f'Required features: {required_features}')
try:
log.info(f'Saving Dataset: {str(output_data_path)}')
write_data(df, output_data_path, index=False)
status = {'Status':'Success','DataFilePath':IOFiles['outputData'],'Records':len(df)}
except:
raise ValueError('Unable to create data file')
meta_data_file = targetPath/IOFiles['metaData']
meta_data = dict()
meta_data['load_data'] = {}
meta_data['load_data']['selected_features'] = [x for x in config['selected_features'] if x != config['target_feature']]
meta_data['load_data']['Status'] = status
write_json(meta_data, meta_data_file)
output = json.dumps(status)
log.info(output)
return output
"""
def addValidateConfigCode(self, indent=1):
self.function_code += self.__addValidateConfigCode()
def generateCode(self):
self.addValidateConfigCode()
self.addLoadDataCode()
self.addMainCode()
self.code_generated = True
def addStatement(self, statement, indent=1):
self.codeText += '\\n' + self.tab * indent + statement
def getCode(self):
if not self.code_generated:
self.generateCode()
return self.function_code + '\\n' + self.codeText
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class drift():
def __init__(self, tab_size=4):
self.tab = ' ' * tab_size
self.codeText = ''
def getInputFiles(self):
IOFiles = {
"log": "aion.log",
"trainingData":"rawData.dat",
"production": "production.json",
"monitoring":"monitoring.json",
"prodData": "prodData",
"prodDataGT":"prodDataGT"
}
text = 'IOFiles = '
if not IOFiles:
text += '{ }'
else:
text += json.dumps(IOFiles, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\\n'
text += self.getInputFiles()
if indent:
text = text.replace('\\n', self.tab * indent + '\\n')
return text
def addStatement(self, statement, indent=1):
self.codeText += '\\n' + self.tab * indent + statement
def getCode(self):
return self.codeText
# temporary code
def get_input_drift_import_modules(self):
return [
{'module': 'sys', 'mod_from': None, 'mod_as': None},
{'module': 'json', 'mod_from': None, 'mod_as': None},
{'module': 'mlflow', 'mod_from': None, 'mod_as': None},
{'module': 'stats', 'mod_from': 'scipy', 'mod_as': 'st'},
{'module': 'numpy', 'mod_from': None, 'mod_as': 'np'},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'warnings', 'mod_from': None, 'mod_as': None},
{'module': 'platform', 'mod_from': None, 'mod_as': None }
]
def get_input_drift_code(self):
return """
class inputdrift():
def __init__(self,base_config):
if 'mlflowURL' in base_config:
self.usecase = base_config['modelName'] + '_' + base_config['modelVersion']
self.currentDataLocation = base_config['currentDataLocation']
home = Path.home()
if platform.system() == 'Windows':
from pathlib import WindowsPath
output_data_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'Data'
output_model_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'target'/self.usecase
else:
from pathlib import PosixPath
output_data_dir = PosixPath(home)/'HCLT'/'AION'/'Data'
output_model_dir = Posix |
Path(home)/'HCLT'/'AION'/'target'/self.usecase
if not output_model_dir.exists():
raise ValueError(f'Configuration file not found at {output_model_dir}')
tracking_uri = 'file:///' + str(Path(output_model_dir)/'mlruns')
registry_uri = 'sqlite:///' + str(Path(output_model_dir)/'mlruns.db')
mlflow.set_tracking_uri(tracking_uri)
mlflow.set_registry_uri(registry_uri)
client = mlflow.tracking.MlflowClient(
tracking_uri=tracking_uri,
registry_uri=registry_uri,
)
model_version_uri = 'models:/{model_name}/production'.format(model_name=self.usecase)
model = mlflow.pyfunc.load_model(model_version_uri)
run = client.get_run(model.metadata.run_id)
if run.info.artifact_uri.startswith('file:'):
artifact_path = Path(run.info.artifact_uri[len('file:///') : ])
else:
artifact_path = Path(run.info.artifact_uri)
self.trainingDataPath = artifact_path/(self.usecase + '_data.csv')
def get_input_drift(self,current_data, historical_data):
curr_num_feat = current_data.select_dtypes(include='number')
hist_num_feat = historical_data.select_dtypes(include='number')
num_features = [feat for feat in historical_data.columns if feat in curr_num_feat]
alert_count = 0
data = {
'current':{'data':current_data},
'hist': {'data': historical_data}
}
dist_changed_columns = []
dist_change_message = []
for feature in num_features:
curr_static_value = round(st.ks_2samp( hist_num_feat[feature], curr_num_feat[feature]).pvalue,3)
if (curr_static_value < 0.05):
try:
distribution = {}
distribution['hist'] = self.DistributionFinder( historical_data[feature])
distribution['curr'] = self.DistributionFinder( current_data[feature])
if(distribution['hist']['name'] == distribution['curr']['name']):
pass
else:
alert_count = alert_count + 1
dist_changed_columns.append(feature)
changed_column = {}
changed_column['Feature'] = feature
changed_column['KS_Training'] = curr_static_value
changed_column['Training_Distribution'] = distribution['hist']['name']
changed_column['New_Distribution'] = distribution['curr']['name']
dist_change_message.append(changed_column)
except:
pass
if alert_count:
resultStatus = dist_change_message
else :
resultStatus='Model is working as expected'
return(alert_count, resultStatus)
def DistributionFinder(self,data):
best_distribution =''
best_sse =0.0
if(data.dtype in ['int','int64']):
distributions= {'bernoulli':{'algo':st.bernoulli},
'binom':{'algo':st.binom},
'geom':{'algo':st.geom},
'nbinom':{'algo':st.nbinom},
'poisson':{'algo':st.poisson}
}
index, counts = np.unique(data.astype(int),return_counts=True)
if(len(index)>=2):
best_sse = np.inf
y1=[]
total=sum(counts)
mean=float(sum(index*counts))/total
variance=float((sum(index**2*counts) -total*mean**2))/(total-1)
dispersion=mean/float(variance)
theta=1/float(dispersion)
r=mean*(float(theta)/1-theta)
for j in counts:
y1.append(float(j)/total)
distributions['bernoulli']['pmf'] = distributions['bernoulli']['algo'].pmf(index,mean)
distributions['binom']['pmf'] = distributions['binom']['algo'].pmf(index,len(index),p=mean/len(index))
distributions['geom']['pmf'] = distributions['geom']['algo'].pmf(index,1/float(1+mean))
distributions['nbinom']['pmf'] = distributions['nbinom']['algo'].pmf(index,mean,r)
distributions['poisson']['pmf'] = distributions['poisson']['algo'].pmf(index,mean)
sselist = []
for dist in distributions.keys():
distributions[dist]['sess'] = np.sum(np.power(y1 - distributions[dist]['pmf'], 2.0))
if np.isnan(distributions[dist]['sess']):
distributions[dist]['sess'] = float('inf')
best_dist = min(distributions, key=lambda v: distributions[v]['sess'])
best_distribution = best_dist
best_sse = distributions[best_dist]['sess']
elif (len(index) == 1):
best_distribution = 'Constant Data-No Distribution'
best_sse = 0.0
elif(data.dtype in ['float64','float32']):
distributions = [st.uniform,st.expon,st.weibull_max,st.weibull_min,st.chi,st.norm,st.lognorm,st.t,st.gamma,st.beta]
best_distribution = st.norm.name
best_sse = np.inf
nrange = data.max() - data.min()
y, x = np.histogram(data.astype(float), bins='auto', density=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
for distribution in distributions:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
params = distribution.fit(data.astype(float))
arg = params[:-2]
loc = params[-2]
scale = params[-1]
pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)
sse = np.sum(np.power(y - pdf, 2.0))
if( sse < best_sse):
best_distribution = distribution.name
best_sse = sse
return {'name':best_distribution, 'sse': best_sse}
def check_drift( config):
inputdriftObj = inputdrift(config)
historicaldataFrame=pd.read_csv(inputdriftObj.trainingDataPath,skipinitialspace = True,na_values=['-','?'])
currentdataFrame=pd.read_csv(inputdriftObj.currentDataLocation,skipinitialspace = True,na_values=['-','?'])
historicaldataFrame.columns = historicaldataFrame.columns.str.strip()
currentdataFrame.columns = currentdataFrame.columns.str.strip()
dataalertcount,message = inputdriftObj.get_input_drift(currentdataFrame,historicaldataFrame)
if message == 'Model is working as expected':
output_json = {'status':'SUCCESS','data':{'Message':'Model is working as expected'}}
else:
output_json = {'status':'SUCCESS','data':{'Affected Columns':message}}
return(output_json)
"""
def get_main_drift_code(self, problem_type, smaller_is_better=True):
text = ''
if problem_type == 'classification':
text += """
def is_drift_within_limits(production, current_matrices,scoring_criteria,threshold = 5):
testscore = production['score']
current_score = current_matrices[scoring_criteria]
threshold_value = testscore * threshold / 100.0
if current_score > (testscore - threshold_value) :
return True
else:
return False
def get_metrices(actual_values, predicted_values):
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
result = {}
accuracy_score = accuracy_score(actual_values, predicted_values)
avg_precision = precision_score(actual_values, predicted_values,
average='macro')
avg_recall = recall_score(actual_values, predicted_values,
average='macro')
avg_f1 = f1_score(actual_values, predicted_values,
average='macro')
result['accuracy'] = round((accuracy_score*100),2)
result['precision'] = round((avg_precision*100),2)
result['recall'] = round((avg_recall*100),2)
result['f1'] = round((avg_f1*100),2)
return result
"""
else:
text += """
def is_drift_within_limits(production, current_matrices,scoring_criteria,threshold = 5):
testscore = production['score']
current_score = current_matrices[scoring_criteria]
threshold_value = testscore * threshold / 100.0
"""
if smaller_is_better:
text += """
if current_score < (testscore + threshold_value) :"""
else:
text += """
if current_score > (testscore - threshold_value) :"""
text += """
return True
else:
return False
def get_metrices(actual_values, predicted_values):
import numpy as np
result = {}
me = np.mean(predicted_values - actual_values)
sde = np.std(predicted_values - actual_values, ddof = 1)
abs_err = np.abs(predicted_values - actual_values)
mae = np.mean(abs_err)
sdae = np.std(abs_err, ddof = 1)
abs_perc_err = 100.0 * np.abs(predicted_values - actual_values) / actual_values
mape = np.mean(abs_perc_err)
sdape = np.std(abs_perc_err, ddof = 1)
result['mean_error'] = me
result['mean_abs_error'] = mae
result['mean_abs_perc_error'] = mape
result['error_std'] = sde
result['abs_error_std'] = sdae
result['abs_perc_error_std'] = sdape
return result
"""
text += """
def monitoring(config, log=None):
targetPath = Path('aion')/config['targetPath']
targetPath.mkdir(parents=True, exist_ok=True)
log_file = targetPath/IOFiles['log']
log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)
output_json = {}
trainingDataLocation = targetPath/IOFiles['trainingData']
monitoring = targetPath/IOFiles['monitoring']
log.info(f'Input Location External: {config["inputUriExternal"]}')
trainingStatus = 'False'
dataFileLocation = ''
driftStatus = 'No Drift'
if monitoring.exists():
monitoring_data = read_json(monitoring)
if monitoring_data.get('runNo', False):
reader = dataReader(reader_type=monitoring_data.get('prod_db_type','sqlite'),target_path=targetPath, config=config.get('db_config',None))
production= targetPath/IOFiles['production']
proddataDF = pd.DataFrame()
predicted_data = pd.DataFrame()
if production.exists():
production = read_json(production)
if reader.file_exists(IOFiles['prodData']) and reader.file_exists(IOFiles['prodDataGT']):
predicted_data = reader.read(IOFiles['prodData'])
actual_data = reader.read(IOFiles['prodDataGT'])
common_col = [k for k in predicted_data.columns.tolist() if k in actual_data.columns.tolist()]
proddataDF = pd.merge(actual_data, predicted_data, on =common_col,how = 'inner')
currentPerformance = {}
currentPerformance = get_metrices(proddataDF[config['target_feature']], proddataDF['prediction'])
if is_drift_within_limits(production, currentPerformance,config['scoring_criteria']):
log.info(f'OutputDrift: No output drift found')
output_json.update({'outputDrift':'Model score is with in limits'})
else:
log.info(f'OutputDrift: Found Output Drift')
log.info(f'Original Test Score: {production["score"]}')
log.info(f'Current Score: {currentPerformance[config["scoring_criteria"]]}')
output_json.update({'outputDrift':{'Meassage': 'Model output is drifted','trainedScore':production["score"], 'currentScore':currentPerformance[config["scoring_criteria"]]}})
trainingStatus = 'True'
driftStatus = 'Output Drift'
else:
if reader.file_exists(IOFiles['prodData']):
predicted_data = reader.read(IOFiles['prodData'])
log.info(f'OutputDrift: Prod Data not found')
output_json.update({'outputDrift':'Prod Data not found'})
else:
log.info(f'Last Time pipeline not executed completely')
output_json.update({'Msg':'Pipeline is not executed completely'})
trainingStatus = 'True'
if config['inputUriExternal']:
dataFileLocation = config['inputUriExternal']
elif 's3' in config.keys():
dataFileLocation = 'cloud'
else:
dataFileLocation = config['inputUri']
if trainingStatus == 'False':
historicaldataFrame=pd.read_csv(trainingDataLocation)
if config['inputUriExternal']:
currentdataFrame=pd.read_csv(config['inputUriExternal'])
elif not predicted_data.empty:
currentdataFrame = predicted_data.copy()
elif 's3' in config.keys():
reader = dataReader(reader_type='s3',target_path=config['targetPath'], config=config['s3'])
currentdataFrame = reader.read(config['s3']['file_name'])
else |
:
currentdataFrame=pd.read_csv(config['inputUri'])
inputdriftObj = inputdrift(config)
dataalertcount,inputdrift_message = inputdriftObj.get_input_drift(currentdataFrame,historicaldataFrame)
if inputdrift_message == 'Model is working as expected':
log.info(f'InputDrift: No input drift found')
output_json.update({'Status':'SUCCESS','inputDrift':'Model is working as expected'})
else:
log.info(f'InputDrift: Input drift found')
log.info(f'Affected Columns {inputdrift_message}')
output_json.update({'inputDrift':{'Affected Columns':inputdrift_message}})
trainingStatus = 'True'
driftStatus = 'Input Drift'
if config['inputUriExternal']:
dataFileLocation = config['inputUriExternal']
elif actual_data_path.exists() and predict_data_path.exists():
dataFileLocation = ''
elif 's3' in config.keys():
dataFileLocation = 'cloud'
else:
dataFileLocation = config['inputUri']
else:
log.info(f'Pipeline Executing first Time')
output_json.update({'Msg':'Pipeline executing first time'})
trainingStatus = 'True'
if config['inputUriExternal']:
dataFileLocation = config['inputUriExternal']
elif 's3' in config.keys():
dataFileLocation = 'cloud'
else:
dataFileLocation = config['inputUri']
else:
log.info(f'Pipeline Executing first Time')
output_json.update({'Msg':'Pipeline executing first time'})
trainingStatus = 'True'
if config['inputUriExternal']:
dataFileLocation = config['inputUriExternal']
elif 's3' in config.keys():
dataFileLocation = 'cloud'
else:
dataFileLocation = config['inputUri']
if monitoring.exists():
monitoring_data['runNo'] = int(monitoring_data.get('runNo', '0')) + 1
else:
monitoring_data = {}
monitoring_data['runNo'] = 1
monitoring_data['prod_db_type'] = config.get('prod_db_type', 'sqlite')
monitoring_data['db_config'] = config.get('db_config', {})
monitoring_data['mlflow_config'] = config.get('mlflow_config', None)
if 's3' in config.keys():
monitoring_data['s3'] = config['s3']
monitoring_data['dataLocation'] = dataFileLocation
monitoring_data['driftStatus'] = driftStatus
write_json(monitoring_data,targetPath/IOFiles['monitoring'])
output = {'Status':'SUCCESS'}
output.update(output_json)
return(json.dumps(output))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--inputUri', help='Training Data Location')
args = parser.parse_args()
config_file = Path(__file__).parent/'config.json'
if not Path(config_file).exists():
raise ValueError(f'Config file is missing: {config_file}')
config = read_json(config_file)
config['inputUriExternal'] = None
if args.inputUri:
if args.inputUri != '':
config['inputUriExternal'] = args.inputUri
log = None
try:
print(monitoring(config, log))
except Exception as e:
if log:
log.error(e, exc_info=True)
status = {'Status':'Failure','Message':str(e)}
print(json.dumps(status))
raise Exception(str(e))
"""
return text<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class selector():
def __init__(self, indent=0, tab_size=4):
self.tab = " "*tab_size
self.codeText = f"\\n\\ndef featureSelector(log):"
self.pipe = 'pipe'
self.code_generated = False
self.input_files = {}
self.output_files = {}
self.function_code = ''
self.addInputFiles({'inputData' : 'transformedData.dat', 'metaData' : 'modelMetaData.json','log' : 'aion.log','outputData' : 'featureEngineeredData.dat'})
def addInputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def addOutputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def getInputFiles(self):
text = 'IOFiles = '
if not self.input_files:
text += '{ }'
else:
text += json.dumps(self.input_files, indent=4)
return text
def getOutputFiles(self):
text = 'output_file = '
if not self.output_files:
text += '{ }'
else:
text += json.dumps(self.output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\\n'
text += self.getInputFiles()
if indent:
text = text.replace('\\n', self.tab * indent + '\\n')
return text
def __addValidateConfigCode(self):
text = "\\n\\
\\ndef validateConfig():\\
\\n config_file = Path(__file__).parent/'config.json'\\
\\n if not Path(config_file).exists():\\
\\n raise ValueError(f'Config file is missing: {config_file}')\\
\\n config = read_json(config_file)\\
\\n return config"
return text
def addMainCode(self):
self.codeText += "\\n\\
\\nif __name__ == '__main__':\\
\\n log = None\\
\\n try:\\
\\n print(featureSelector(log))\\
\\n except Exception as e:\\
\\n if log:\\
\\n log.error(e, exc_info=True)\\
\\n status = {'Status':'Failure','Message':str(e)}\\
\\n print(json.dumps(status))\\
"
def addValidateConfigCode(self, indent=1):
self.function_code += self.__addValidateConfigCode()
def addStatement(self, statement, indent=1):
self.codeText += '\\n' + self.tab * indent + statement
def getCode(self):
return self.function_code + '\\n' + self.codeText
def addLocalFunctionsCode(self):
self.addValidateConfigCode()
def getPrefixModules(self):
modules = [{'module':'Path', 'mod_from':'pathlib'}
,{'module':'pandas', 'mod_as':'pd'}
]
return modules
def addPrefixCode(self, indent=1):
self.codeText += "\\
\\n config = validateConfig()\\
\\n targetPath = Path('aion')/config['targetPath']\\
\\n if not targetPath.exists():\\
\\n raise ValueError(f'targetPath does not exist')\\
\\n meta_data_file = targetPath/IOFiles['metaData']\\
\\n if meta_data_file.exists():\\
\\n meta_data = read_json(meta_data_file)\\
\\n else:\\
\\n raise ValueError(f'Configuration file not found: {meta_data_file}')\\
\\n log_file = targetPath/IOFiles['log']\\
\\n log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)\\
\\n dataLoc = targetPath/IOFiles['inputData']\\
\\n if not dataLoc.exists():\\
\\n return {'Status':'Failure','Message':'Data location does not exists.'}\\
\\n\\
\\n status = dict()\\
\\n df = pd.read_csv(dataLoc)\\
\\n prev_step_output = meta_data['transformation']"
def getSuffixModules(self):
modules = [{'module':'platform'}
,{'module':'time'}
]
return modules
def addSuffixCode(self, indent=1):
self.codeText += "\\n\\
\\n csv_path = str(targetPath/IOFiles['outputData'])\\
\\n write_data(df, csv_path,index=False)\\
\\n status = {'Status':'Success','DataFilePath':IOFiles['outputData'],'total_features':total_features, 'selected_features':selected_features}\\
\\n log.info(f'Selected data saved at {csv_path}')\\
\\n meta_data['featureengineering']['Status'] = status\\
\\n write_json(meta_data, str(targetPath/IOFiles['metaData']))\\
\\n log.info(f'output: {status}')\\
\\n return json.dumps(status)"
def getMainCodeModules(self):
modules = [{'module':'Path', 'mod_from':'pathlib'}
,{'module':'sys'}
,{'module':'json'}
,{'module':'logging'}
,{'module':'argparse'}
]
return modules
def add_variable(self, name, value, indent=1):
if isinstance(value, str):
self.codeText += f"\\n{self.tab * indent}{name} = '{value}'"
else:
self.codeText += f"\\n{self.tab * indent}{name} = {value}"
def addStatement(self, statement, indent=1):
self.codeText += f"\\n{self.tab * indent}{statement}"
def modelBased(self, problem_type, indent=1):
if problem_type == 'classification':
self.codeText += f"\\n{self.tab * indent}selector = SelectFromModel(ExtraTreesClassifier())"
self.codeText += f"\\n{self.tab * indent}selector()"
if problem_type == 'regression':
self.codeText += f"\\n{self.tab * indent}pipe = Pipeline([('selector', SelectFromModel(Lasso()))])"
self.codeText += f"\\n{self.tab * indent}selector.fit(df[train_features],df[target_feature])"
self.codeText += f"\\n{self.tab * indent}selected_features = [x for x,y in zip(train_features, selector.get_support()) if y]"
self.codeText += f"\\n{self.tab * indent}df = df[selected_features + [target_feature]]"
def featureReductionBased(self, reducer, n_components, indent=1):
if reducer == 'pca':
if n_components == 0:
self.codeText += f"\\n{self.tab * indent}pipe = Pipeline([('selector', PCA(n_components='mle',svd_solver = 'full'))])"
elif n_components < 1:
self.codeText += f"\\n{self.tab * indent}pipe = Pipeline([('selector', PCA(n_components={n_components},svd_solver = 'full'))])"
else:
self.codeText += f"\\n{self.tab * indent}pipe = Pipeline([('selector', PCA(n_components=int({n_components})))])"
self.codeText += "pipe.fit_transform(df)"
def getPipe(self):
return self.pipe
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
from mlac.ml.core import *
from .utility import *
def run_output_drift(config):
importer = importModule()
drifter = output_drift(missing = get_variable('fillna', False), word2num_features= get_variable('word2num_features', False), cat_encoder = get_variable('cat_encoder', False),target_encoder = get_variable('target_encoder', False),normalizer = get_variable('normalizer', False),text_profiler = get_variable('text_features', False),feature_reducer = get_variable('feature_reducer', False),score_smaller_is_better = get_variable('smaller_is_better', False),problem_type=config['problem_type'])
function = global_function()
importer.addModule('sys')
importer.addModule('math')
importer.addModule('json')
importer.addModule('platform')
importer.addModule('joblib')
importer.addModule('mlflow')
importer.addModule('sklearn')
importer.addModule('numpy', mod_as='np')
importer.addModule('pandas', mod_as='pd')
importer.addModule('Path', mod_from='pathlib')
importer.addModule('InfluxDBClient', mod_from='influxdb')
function.add_function('readWrite')
code = file_header(config['modelName']+'_'+config['modelVersion'])
code += importer.getCode()
code += function.getCode()
drifter.generateCode()
code += drifter.getCode()
deploy_path = Path(config["deploy_path"])/'MLaC'/'OutputDrift'
deploy_path.mkdir(parents=True, exist_ok=True)
py_file = deploy_path/"output_drift.py"
with open(py_file, "w") as f:
f.write(code)
req_file = deploy_path/"requirements.txt"
with open(req_file, "w") as f:
f.write(importer.getBaseModule())
create_docker_file('output_drift', deploy_path)
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import shutil
from pathlib import Path
import json
from mlac.ml |
.core import *
from .utility import *
import tarfile
output_file_map = {
'text' : {'text' : 'text_profiler.pkl'},
'targetEncoder' : {'targetEncoder' : 'targetEncoder.pkl'},
'featureEncoder' : {'featureEncoder' : 'inputEncoder.pkl'},
'normalizer' : {'normalizer' : 'normalizer.pkl'}
}
def add_common_imports(importer):
common_importes = [
{'module': 'json', 'mod_from': None, 'mod_as': None},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'argparse', 'mod_from': None, 'mod_as': None},
{'module': 'platform', 'mod_from': None, 'mod_as': None }
]
for mod in common_importes:
importer.addModule(mod['module'], mod_from=mod['mod_from'], mod_as=mod['mod_as'])
def add_text_dependency():
return """nltk==3.6.3
textblob==0.15.3
spacy==3.1.3
demoji==1.1.0
bs4==0.0.1
text_unidecode==1.3
contractions==0.1.73
"""
def get_transformer_params(config):
param_keys = ["modelVersion","problem_type","target_feature","train_features","text_features","profiler","test_ratio"] #Bugid 13217
data = {key:value for (key,value) in config.items() if key in param_keys}
data['targetPath'] = config['modelName']
return data
def run_transformer(config):
transformer = profiler()
importer = importModule()
function = global_function()
importModules(importer, transformer.getPrefixModules())
importer.addModule('warnings')
transformer.addPrefixCode()
importer.addModule('train_test_split', mod_from='sklearn.model_selection')
if config["problem_type"] == 'classification':
importer.addModule('LabelEncoder', mod_from='sklearn.preprocessing')
transformer.addInputFiles({'targetEncoder':'targetEncoder.pkl'})
update_variable('target_encoder', True)
transformer.addStatement("train_data, test_data = train_test_split(df,stratify=df[target_feature],test_size=config['test_ratio'])",indent=2) #Bugid 13217
transformer.addStatement("profilerObj = profiler(xtrain=train_data, target=target_feature, encode_target=True, config=config['profiler'],log=log)") #Bugid 13217
else:
transformer.addStatement("train_data, test_data = train_test_split(df,test_size=config['test_ratio'])",indent=2)
transformer.addStatement("profilerObj = profiler(xtrain=train_data, target=target_feature, config=config['profiler'],log=log)")
importModules(importer, transformer.getSuffixModules())
importModules(importer, transformer.getMainCodeModules())
transformer.addSuffixCode( config["problem_type"] == 'classification')
transformer.addMainCode()
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'DataTransformation'
deploy_path.mkdir(parents=True, exist_ok=True)
generated_files = []
# create the utility file
importer.addLocalModule('*', mod_from='utility')
utility_obj = utility_function('transformer')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create empty init file to make a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
# create the dataProfiler file
profiler_importer = importModule()
importer.addLocalModule('profiler', mod_from='dataProfiler')
profiler_obj = data_profiler(profiler_importer, True if config["text_features"] else False)
code_text = profiler_obj.get_code() # import statement will be generated when profiler_obj.get_code is called.
# need to copy data profiler from AION code as code is splitted and merging code amnnually
# can add bugs. need a better way to find the imported module
#aion_transformer = Path(__file__).parent.parent.parent.parent/'transformations'
aion_utilities = Path(__file__).parent.parent.parent.parent/'utilities' #added for non encryption --Usnish
(deploy_path/'transformations').mkdir(parents=True, exist_ok=True)
if not (aion_utilities/'transformations'/'dataProfiler.py').exists():
raise ValueError('Data profiler file removed from AION')
shutil.copy(aion_utilities/'transformations'/'dataProfiler.py',deploy_path/"dataProfiler.py")
shutil.copy(aion_utilities/'transformations'/'data_profiler_functions.py',deploy_path/"transformations"/"data_profiler_functions.py")
if (deploy_path/'text').exists():
shutil.rmtree(deploy_path/'text')
with tarfile.open(aion_utilities/'text.tar') as file:
file.extractall(deploy_path)
if (deploy_path/'utils').exists():
shutil.rmtree(deploy_path/'utils')
with tarfile.open(aion_utilities / 'utils.tar') as file:
file.extractall(deploy_path)
generated_files.append("dataProfiler.py")
generated_files.append("transformations")
generated_files.append("text")
generated_files.append("utils")
code = file_header(usecase)
code += "\\nimport os\\nos.path.abspath(os.path.join(__file__, os.pardir))\\n" #chdir to import from current dir
code += importer.getCode()
code += '\\nwarnings.filterwarnings("ignore")\\n'
code += transformer.getInputOutputFiles()
code += function.getCode()
transformer.addLocalFunctionsCode()
code += transformer.getCode()
with open(deploy_path/"aionCode.py", "w") as f:
f.write(code)
generated_files.append("aionCode.py")
with open(deploy_path/"requirements.txt", "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer(), profiler_importer])
if config["text_features"]:
req += add_text_dependency()
f.write(req)
generated_files.append("requirements.txt")
config_file = deploy_path/"config.json"
config_data = get_transformer_params(config)
with open (config_file, "w") as f:
json.dump(config_data, f, indent=4)
generated_files.append("config.json")
create_docker_file('transformer', deploy_path,config['modelName'], generated_files,True if config["text_features"] else False)
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
from mlac.ml.core import *
from .utility import *
def get_register_params(config, models):
param_keys = ["modelVersion","problem_type"]
data = {key:value for (key,value) in config.items() if key in param_keys}
data['targetPath'] = config['modelName']
data['models'] = models
return data
def run_register(config):
importer = importModule()
function = global_function()
registration = register(importer)
function.add_function('get_mlflow_uris')
models = get_variable('models_name')
smaller_is_better = get_variable('smaller_is_better', False)
registration.addClassCode(smaller_is_better)
registration.addLocalFunctionsCode(models)
registration.addPrefixCode()
registration.addMainCode(models)
importModules(importer, registration.getMainCodeModules())
importer.addModule('warnings')
generated_files = []
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'ModelRegistry'
deploy_path.mkdir(parents=True, exist_ok=True)
# create the utility file
importer.addLocalModule('*', mod_from='utility')
utility_obj = utility_function('register')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create empty init file required for creating a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
code = registration.getImportCode()
code += '\\nwarnings.filterwarnings("ignore")\\n'
code += registration.getInputOutputFiles()
code += function.getCode()
code += registration.getCode()
# create serving file
with open(deploy_path/"aionCode.py", 'w') as f:
f.write(file_header(usecase) + code)
generated_files.append("aionCode.py")
# create requirements file
req_file = deploy_path/"requirements.txt"
with open(req_file, "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()])
f.write(req)
generated_files.append("requirements.txt")
# create config file
with open (deploy_path/"config.json", "w") as f:
json.dump(get_register_params(config, models), f, indent=4)
generated_files.append("config.json")
# create docker file
create_docker_file('register', deploy_path,config['modelName'], generated_files) <s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import datetime
from pathlib import Path
variables = {}
def init_variables():
global variables
variables = {}
def update_variable(name, value):
variables[name] = value
def get_variable(name, default=None):
return variables.get(name, default)
def append_variable(name, value):
data = get_variable(name)
if not data:
update_variable(name, [value])
elif not isinstance(data, list):
update_variable(name, [data, value])
else:
data.append(value)
update_variable(name, data)
def addDropFeature(feature, features_list, coder, indent=1):
coder.addStatement(f'if {feature} in {features_list}:', indent=indent)
coder.addStatement(f'{features_list}.remove({feature})', indent=indent+1)
def importModules(importer, modules_list):
for module in modules_list:
mod_from = module.get('mod_from',None)
mod_as = module.get('mod_as',None)
importer.addModule(module['module'], mod_from=mod_from, mod_as=mod_as)
def file_header(use_case, module_name=None):
time_str = datetime.datetime.now().isoformat(timespec='seconds', sep=' ')
text = "#!/usr/bin/env python\\n# -*- coding: utf-8 -*-\\n"
return text + f"'''\\nThis file is automatically generated by AION for {use_case} usecase.\\nFile generation time: {time_str}\\n'''"
def get_module_mapping(module):
mapping = {
"LogisticRegression": {'module':'LogisticRegression', 'mod_from':'sklearn.linear_model'}
,"GaussianNB": {'module':'GaussianNB', 'mod_from':'sklearn.naive_bayes'}
,"DecisionTreeClassifier": {'module':'DecisionTreeClassifier', 'mod_from':'sklearn.tree'}
,"SVC": {'module':'SVC', 'mod_from':'sklearn.svm'}
,"KNeighborsClassifier": {'module':'KNeighborsClassifier', 'mod_from':'sklearn.neighbors'}
,"GradientBoostingClassifier": {'module':'GradientBoostingClassifier', 'mod_from':'sklearn.ensemble'}
,'RandomForestClassifier':{'module':'RandomForestClassifier','mod_from':'sklearn.ensemble'}
,'XGBClassifier':{'module':'XGBClassifier','mod_from':'xgboost'}
,'LGBMClassifier':{'module':'LGBMClassifier','mod_from':'lightgbm'}
,'CatBoostClassifier':{'module':'CatBoostClassifier','mod_from':'catboost'}
,"LinearRegression": {'module':'LinearRegression', 'mod_from':'sklearn.linear_model'}
,"Lasso": {'module':'Lasso', 'mod_from':'sklearn.linear_model'}
,"Ridge": {'module':'Ridge', 'mod_from':'sklearn.linear_model'}
,"DecisionTreeRegressor": {'module':'DecisionTreeRegressor', 'mod_from':'sklearn.tree'}
,'RandomForestRegressor':{'module':'RandomForestRegressor','mod_from':'sklearn.ensemble'}
,'XGBRegressor':{'module':'XGBRegressor','mod_from':'xgboost'}
,'LGBMRegressor':{'module':'LGBMRegressor','mod_from':'lightgbm'}
,'CatBoostRegressor':{'module':'CatBoostRegressor','mod_from':'catboost'}
}
return mapping.get(module, None)
def create_docker_file(name, path,usecasename,files=[],text_feature=False):
text = ""
if name == 'load_data':
text='FROM python:3.8-slim-buster'
text+='\\n'
text+='LABEL " |
usecase"="'+str(usecasename)+'"'
text+='\\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
for file in files:
text+=f'\\nCOPY {file} {file}'
text+='\\n'
text+='RUN pip install --no-cache-dir -r requirements.txt'
elif name == 'transformer':
text='FROM python:3.8-slim-buster\\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
text+='\\n'
for file in files:
text+=f'\\nCOPY {file} {file}'
text+='\\n'
text+='''RUN \\
'''
text+=''' pip install --no-cache-dir -r requirements.txt\\
'''
if text_feature:
text += ''' && python -m nltk.downloader stopwords && python -m nltk.downloader punkt && python -m nltk.downloader wordnet && python -m nltk.downloader averaged_perceptron_tagger\\
'''
text+='\\n'
elif name == 'selector':
text='FROM python:3.8-slim-buster'
text+='\\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
text+='\\n'
for file in files:
text+=f'\\nCOPY {file} {file}'
text+='\\n'
text+='RUN pip install --no-cache-dir -r requirements.txt'
elif name == 'train':
text='FROM python:3.8-slim-buster'
text+='\\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
text+='\\n'
text+='COPY requirements.txt requirements.txt'
text+='\\n'
text+='COPY config.json config.json'
text+='\\n'
text+='COPY aionCode.py aionCode.py'
text+='\\n'
text+='COPY utility.py utility.py'
text+='\\n'
text+='RUN pip install --no-cache-dir -r requirements.txt'
elif name == 'register':
text='FROM python:3.8-slim-buster'
text+='\\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
text+='\\n'
for file in files:
text+=f'\\nCOPY {file} {file}'
text+='\\n'
text+='RUN pip install --no-cache-dir -r requirements.txt'
elif name == 'Prediction':
text='FROM python:3.8-slim-buster'
text+='\\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
text+='\\n'
for file in files:
text+=f'\\nCOPY {file} {file}'
text+='\\n'
text+='''RUN \\
'''
text+='''pip install --no-cache-dir -r requirements.txt\\
'''
if text_feature:
text += ''' && python -m nltk.downloader stopwords && python -m nltk.downloader punkt && python -m nltk.downloader wordnet && python -m nltk.downloader averaged_perceptron_tagger\\
'''
text+='\\n'
text+='ENTRYPOINT ["python", "aionCode.py","-ip","0.0.0.0","-pn","8094"]\\n'
elif name == 'input_drift':
text='FROM python:3.8-slim-buster'
text+='\\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
text+='\\n'
for file in files:
text+=f'\\nCOPY {file} {file}'
text+='\\n'
text+='RUN pip install --no-cache-dir -r requirements.txt'
file_name = Path(path)/'Dockerfile'
with open(file_name, 'w') as f:
f.write(text)<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from .load_data import run_loader
from .transformer import run_transformer
from .selector import run_selector
from .trainer import run_trainer
from .register import run_register
from .deploy import run_deploy
from .drift_analysis import run_drift_analysis
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import shutil
from pathlib import Path
import json
from mlac.ml.core import *
from .utility import *
import tarfile
def add_text_dependency():
return """nltk==3.6.3
textblob==0.15.3
spacy==3.1.3
demoji==1.1.0
bs4==0.0.1
text_unidecode==1.3
contractions==0.1.73
"""
def get_deploy_params(config):
param_keys = ["modelVersion","problem_type","target_feature"]
data = {key:value for (key,value) in config.items() if key in param_keys}
data['targetPath'] = config['modelName']
data['ipAddress'] = '127.0.0.1'
data['portNo'] = '8094'
return data
def import_trainer_module(importer):
non_sklearn_modules = get_variable('non_sklearn_modules')
if non_sklearn_modules:
for mod in non_sklearn_modules:
module = get_module_mapping(mod)
mod_from = module.get('mod_from',None)
mod_as = module.get('mod_as',None)
importer.addModule(module['module'], mod_from=mod_from, mod_as=mod_as)
imported_modules = [
{'module': 'sys', 'mod_from': None, 'mod_as': None},
{'module': 'math', 'mod_from': None, 'mod_as': None},
{'module': 'json', 'mod_from': None, 'mod_as': None},
{'module': 'scipy', 'mod_from': None, 'mod_as': None},
{'module': 'joblib', 'mod_from': None, 'mod_as': None},
{'module': 'shutil', 'mod_from': None, 'mod_as': None},
{'module': 'mlflow', 'mod_from': None, 'mod_as': None},
{'module': 'sklearn', 'mod_from': None, 'mod_as': None},
{'module': 'numpy', 'mod_from': None, 'mod_as': 'np'},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'argparse', 'mod_from': None, 'mod_as': None},
{'module': 'platform', 'mod_from': None, 'mod_as': None}
]
def run_deploy(config):
generated_files = []
importer = importModule()
deployer = deploy(target_encoder = get_variable('target_encoder', False),feature_reducer = get_variable('feature_reducer', False),score_smaller_is_better = get_variable('smaller_is_better', False))
function = global_function()
importModules(importer, imported_modules)
if get_variable('cat_encoder', False):
importer.addModule('category_encoders')
import_trainer_module(importer)
if get_variable('word2num_features'):
function.add_function('s2n', importer)
if get_variable('text_features'):
importer.addLocalModule('textProfiler', mod_from='text.textProfiler')
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'ModelServing'
deploy_path.mkdir(parents=True, exist_ok=True)
# create the utility file
importer.addLocalModule('*', mod_from='utility')
utility_obj = utility_function('Prediction')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create the production data reader file
importer.addLocalModule('*', mod_from='data_reader')
reader_obj = data_reader(['sqlite','influx'])
with open(deploy_path/"data_reader.py", 'w') as f:
f.write(file_header(usecase) + reader_obj.get_code())
generated_files.append("data_reader.py")
# need to copy data profiler from AION code as code is splitted and merging code amnnually
# can add bugs
aion_utilities = Path(__file__).parent.parent.parent.parent / 'utilities'
with tarfile.open(aion_utilities / 'text.tar') as file:
file.extractall(deploy_path)
if (deploy_path / 'utils').exists():
shutil.rmtree(deploy_path / 'utils')
with tarfile.open(aion_utilities / 'utils.tar') as file:
file.extractall(deploy_path )
generated_files.append("text")
generated_files.append("utils")
# create empty init file required for creating a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
function.add_function('get_mlflow_uris')
code = file_header(usecase)
code += importer.getCode()
code += deployer.getInputOutputFiles()
code += function.getCode()
code += deployer.getCode()
# create prediction file
with open(deploy_path/"predict.py", 'w') as f:
f.write(code)
generated_files.append("predict.py")
# create groundtruth file
with open(deploy_path/"groundtruth.py", 'w') as f:
f.write(file_header(usecase) + deployer.getGroundtruthCode())
generated_files.append("groundtruth.py")
# create create service file
with open(deploy_path/"aionCode.py", 'w') as f:
f.write(file_header(usecase) + deployer.getServiceCode())
generated_files.append("aionCode.py")
importer.addModule('seaborn')
# create requirements file
req_file = deploy_path/"requirements.txt"
with open(req_file, "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer(), reader_obj.get_importer()])
if config["text_features"]:
req += add_text_dependency()
f.write(req)
generated_files.append("requirements.txt")
# create config file
config_file = deploy_path/"config.json"
config_data = get_deploy_params(config)
with open (config_file, "w") as f:
json.dump(config_data, f, indent=4)
generated_files.append("config.json")
# create docker file
create_docker_file('Prediction', deploy_path,config['modelName'], generated_files, True if config["text_features"] else False)<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
from mlac.ml.core import *
from .utility import *
def get_model_name(algo, method):
if method == 'modelBased':
return algo + '_' + 'MLBased'
if method == 'statisticalBased':
return algo + '_' + 'StatisticsBased'
else:
return algo
def get_training_params(config, algo):
param_keys = ["modelVersion","problem_type","target_feature","train_features","scoring_criteria","test_ratio","optimization_param"]
data = {key:value for (key,value) in config.items() if key in param_keys}
data['algorithms'] = {algo: config['algorithms'][algo]}
data['targetPath'] = config['modelName']
return data
def addImporterLearner(model, importer):
module = get_module_mapping(model)
mod_from = module.get('mod_from',None)
mod_as = module.get('mod_as',None)
importer.addModule(module['module'], mod_from=mod_from, mod_as=mod_as)
if not get_variable('non_sklearn_modules'):
update_variable('non_sklearn_modules', [])
if 'sklearn' not in mod_from:
modules = get_variable('non_sklearn_modules')
modules.append(model)
update_variable('non_sklearn_modules', modules)
def addEvaluator(scorer_type, optimizer,trainer, importer):
trainer.addStatement("if not X_test.empty:")
if optimizer == 'genetic':
trainer.addStatement('features = [x for i,x in enumerate(features) if grid.support_[ |
i]]',indent=2)
trainer.addStatement('y_pred = estimator.predict(X_test[features])',indent=2)
if scorer_type == 'accuracy':
importer.addModule('accuracy_score', mod_from='sklearn.metrics')
trainer.addStatement(f"test_score = round(accuracy_score(y_test,y_pred),2) * 100",indent=2)
importer.addModule('confusion_matrix', mod_from='sklearn.metrics')
trainer.addStatement("log.info('Confusion Matrix:')",indent=2)
trainer.addStatement("log.info('\\\\n' + pd.DataFrame(confusion_matrix(y_test,y_pred)).to_string())",indent=2)
elif scorer_type == 'recall':
importer.addModule('recall_score', mod_from='sklearn.metrics')
trainer.addStatement(f"test_score = round(recall_score(y_test,y_pred,average='macro'),2) * 100",indent=2)
importer.addModule('confusion_matrix', mod_from='sklearn.metrics')
trainer.addStatement(f"log.info('Confusion Matrix:\\\\n')",indent=2)
trainer.addStatement(f'log.info(pd.DataFrame(confusion_matrix(y_test,y_pred)))',indent=2)
elif scorer_type == 'precision':
importer.addModule('precision_score', mod_from='sklearn.metrics')
trainer.addStatement(f"test_score = round(precision_score(y_test,y_pred,average='macro'),2) * 100",indent=2)
importer.addModule('confusion_matrix', mod_from='sklearn.metrics')
trainer.addStatement(f"log.info('Confusion Matrix:\\\\n')",indent=2)
trainer.addStatement(f'log.info(pd.DataFrame(confusion_matrix(y_test,y_pred)))',indent=2)
elif scorer_type == 'f1_score':
importer.addModule('f1_score', mod_from='sklearn.metrics')
trainer.addStatement(f"test_score = round(f1_score(y_test,y_pred,average='macro'),2) * 100",indent=2)
importer.addModule('confusion_matrix', mod_from='sklearn.metrics')
trainer.addStatement(f"log.info('Confusion Matrix:\\\\n')",indent=2)
trainer.addStatement(f'log.info(pd.DataFrame(confusion_matrix(y_test,y_pred)))',indent=2)
elif scorer_type == 'roc_auc':
importer.addModule('roc_auc_score', mod_from='sklearn.metrics')
trainer.addStatement("try:")
trainer.addStatement(f"test_score = round(roc_auc_score(y_test,y_pred),2) * 100", indent=3)
importer.addModule('confusion_matrix', mod_from='sklearn.metrics')
trainer.addStatement(f"log.info('Confusion Matrix:\\\\n')",indent=3)
trainer.addStatement(f'log.info(pd.DataFrame(confusion_matrix(y_test,y_pred)))',indent=3)
trainer.addStatement("except:")
trainer.addStatement("try:",indent=3)
trainer.addStatement("actual = pd.get_dummies(y_test)",indent=4)
trainer.addStatement("y_pred = pd.get_dummies(y_pred)",indent=4)
trainer.addStatement(f"test_score = round(roc_auc_score(y_test,y_pred,average='weighted', multi_class='ovr'),2) * 100", indent=3)
trainer.addStatement(f"log.info('Confusion Matrix:\\\\n')",indent=4)
trainer.addStatement(f'log.info(pd.DataFrame(confusion_matrix(y_test,y_pred)))',indent=4)
trainer.addStatement("except:",indent=3)
trainer.addStatement(f"test_score = 0.0", indent=4)
elif scorer_type == 'neg_mean_squared_error' or scorer_type == 'mse':
importer.addModule('mean_squared_error', mod_from='sklearn.metrics')
trainer.addStatement(f'test_score = round(mean_squared_error(y_test,y_pred),2)',indent=2)
update_variable('smaller_is_better', True)
elif scorer_type == 'neg_root_mean_squared_error' or scorer_type == 'rmse':
importer.addModule('mean_squared_error', mod_from='sklearn.metrics')
trainer.addStatement(f'test_score = round(mean_squared_error(y_test,y_pred,squared=False),2)',indent=2)
update_variable('smaller_is_better', True)
elif scorer_type == 'neg_mean_absolute_error' or scorer_type == 'mae':
importer.addModule('mean_absolute_error', mod_from='sklearn.metrics')
trainer.addStatement(f'test_score = round(mean_absolute_error(y_test,y_pred),2)',indent=2)
update_variable('smaller_is_better', True)
elif scorer_type == 'r2':
importer.addModule('r2_score', mod_from='sklearn.metrics')
trainer.addStatement(f'test_score = round(r2_score(y_test,y_pred),2)',indent=2)
def update_search_space(algo, config):
search_space = []
algoritms = config["algorithms"]
model = algo
params = algoritms[model]
model_dict = {model:get_module_mapping(model)['mod_from']}
d = {'algo': model_dict}
d['param'] = params
search_space.append(d)
config['search_space'] = search_space
def get_optimization(optimization, importer, function=None):
if optimization == 'grid':
importer.addModule('GridSearchCV', mod_from='sklearn.model_selection')
optimization = 'GridSearchCV'
elif optimization == 'random':
importer.addModule('RandomizedSearchCV', mod_from='sklearn.model_selection')
optimization = 'RandomizedSearchCV'
elif optimization == 'genetic':
importer.addModule('GeneticSelectionCV', mod_from='genetic_selection')
optimization = 'GeneticSelectionCV'
elif optimization == 'bayesopt':
optimization = 'BayesSearchCV'
function.add_function(optimization,importer)
return optimization
def scoring_criteria_reg(score_param):
scorer_mapping = {
'mse':'neg_mean_squared_error',
'rmse':'neg_root_mean_squared_error',
'mae':'neg_mean_absolute_error',
'r2':'r2'
}
return scorer_mapping.get(score_param, 'neg_mean_squared_error')
def addBalancing(balancingMethod, importer, code):
if balancingMethod == 'oversample':
importer.addModule('SMOTE', mod_from='imblearn.over_sampling')
code.addStatement("\\n # data balancing")
code.addStatement("X_train, y_train = SMOTE(sampling_strategy='auto', k_neighbors=1, random_state=100).fit_resample(X_train, y_train)")
if balancingMethod == 'undersample':
importer.addModule('TomekLinks', mod_from='imblearn.under_sampling')
code.addStatement("\\n # data balancing")
code.addStatement("X_train, y_train = TomekLinks().fit_resample(X_train, y_train)")
def run_trainer(base_config):
base_trainer = learner()
base_importer = importModule()
function = global_function()
base_importer.addModule('joblib')
base_importer.addModule('warnings')
base_importer.addModule('argparse')
base_importer.addModule('pandas', mod_as='pd')
base_importer.addModule('Path', mod_from='pathlib')
function.add_function('get_mlflow_uris')
function.add_function('mlflow_create_experiment')
importModules(base_importer,base_trainer.getPrefixModules())
base_trainer.addPrefixCode()
if base_config["algorithms"]:
base_trainer.add_train_test_split('train_features', 'target_feature', "config['test_ratio']")
if base_config["problem_type"] == 'classification':
if base_config["balancingMethod"]:
addBalancing(base_config["balancingMethod"],base_importer,base_trainer)
base_trainer.addStatement(f"log.info('Data balancing done')")
base_trainer.addStatement("\\n #select scorer")
if base_config["problem_type"] == 'classification':
function.add_function('scoring_criteria', base_importer)
base_trainer.addStatement("scorer = scoring_criteria(config['scoring_criteria'],config['problem_type'], df[target_feature].nunique())")
else:
base_config['scoring_criteria'] = scoring_criteria_reg(base_config['scoring_criteria'])
base_trainer.addStatement(f"scorer = config['scoring_criteria']")
base_trainer.addStatement(f"log.info('Scoring criteria: {base_config['scoring_criteria']}')")
feature_selector = []
if base_config['feature_reducer']:
feature_selector.append(base_config['feature_reducer'])
elif base_config['feature_selector']:
feature_selector = base_config['feature_selector']
for algo in base_config["algorithms"].keys():
for method in feature_selector:
trainer = learner()
importer = importModule()
trainer.copyCode(base_trainer)
importer.copyCode(base_importer)
config = base_config
usecase = config['modelName']+'_'+config['modelVersion']
addImporterLearner(algo, importer)
trainer.addStatement("\\n #Training model")
trainer.addStatement(f"log.info('Training {algo} for {method}')")
trainer.add_model_fit(algo, get_optimization(config["optimization"], importer, function), method, importer)
trainer.addStatement("\\n #model evaluation")
addEvaluator(config['scoring_criteria'],config["optimization"], trainer, importer)
function.add_function('mlflowSetPath')
function.add_function('logMlflow')
importModules(importer, trainer.getSuffixModules())
importModules(importer, trainer.getMainCodeModules())
if base_config["problem_type"] == 'classification':
function.add_function('classification_metrices', importer)
trainer.addStatement("metrices = get_classification_metrices(y_test,y_pred)",indent=2)
trainer.add_100_trainsize_code()
trainer.addStatement("metrices.update({'train_score': train_score, 'test_score':test_score})")
else:
function.add_function('regression_metrices', importer)
trainer.addStatement("metrices = get_regression_metrices(y_test,y_pred)",indent=2)
trainer.add_100_trainsize_code()
trainer.addStatement("metrices.update({'train_score': train_score, 'test_score':test_score})")
trainer.addSuffixCode()
trainer.addMainCode()
model_name = get_model_name(algo,method)
deploy_path = Path(config["deploy_path"])/'MLaC'/('ModelTraining'+'_' + model_name)
deploy_path.mkdir(parents=True, exist_ok=True)
generated_files = []
# create the utility file
importer.addLocalModule('*', mod_from='utility')
utility_obj = utility_function('train')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create empty init file to make a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
code = importer.getCode()
code += 'warnings.filterwarnings("ignore")\\n'
code += f"\\nmodel_name = '{model_name}'\\n"
append_variable('models_name',model_name)
out_files = {'log':f'{model_name}_aion.log','model':f'{model_name}_model.pkl','performance':f'{model_name}_performance.json','metaDataOutput':f'{model_name}_modelMetaData.json'}
trainer.addOutputFiles(out_files)
code += trainer.getInputOutputFiles()
code += function.getCode()
trainer.addLocalFunctionsCode()
code += trainer.getCode()
with open(deploy_path/"aionCode.py", "w") as f:
f.write(code)
generated_files.append("aionCode.py")
with open(deploy_path/"requirements.txt", "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()])
f.write(req)
generated_files.append("requirements.txt")
with open (deploy_path/"config.json", "w") as f:
json.dump(get_training_params(config, algo), f, indent=4)
generated_files.append("config.json")
create_docker_file('train', deploy_path,config['modelName'], generated_files)
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
from mlac.ml.core import *
from .utility import *
def run_input_drift(config):
importer = importModule()
drifter = input_drift()
importer.addModule('sys')
importer.addModule('json')
importer.addModule('mlflow')
importer.addModule('platform')
importer.addModule('warnings')
importer.addModule('numpy', mod_as='np')
importer.addModule('pandas', mod_as='pd')
|
importer.addModule('stats', mod_from='scipy', mod_as='st')
importer.addModule('Path', mod_from='pathlib')
code = file_header(config['modelName']+'_'+config['modelVersion'])
code += importer.getCode()
drifter.generateCode()
code += drifter.getCode()
deploy_path = Path(config["deploy_path"])/'MLaC'/'InputDrift'
deploy_path.mkdir(parents=True, exist_ok=True)
py_file = deploy_path/"input_drift.py"
with open(py_file, "w") as f:
f.write(code)
req_file = deploy_path/"requirements.txt"
with open(req_file, "w") as f:
f.write(importer.getBaseModule())
create_docker_file('input_drift', deploy_path)
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
import platform
from mlac.ml.core import *
from .utility import *
imported_modules = [
{'module': 'json', 'mod_from': None, 'mod_as': None},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'argparse', 'mod_from': None, 'mod_as': None},
{'module': 'platform', 'mod_from': None, 'mod_as': None }
]
def get_load_data_params(config):
param_keys = ["modelVersion","problem_type","target_feature","selected_features"]
data = {key:value for (key,value) in config.items() if key in param_keys}
data['targetPath'] = config['modelName']
return data
def run_loader(config):
generated_files = []
importer = importModule()
loader = tabularDataReader()
importModules(importer, imported_modules)
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'DataIngestion'
deploy_path.mkdir(parents=True, exist_ok=True)
# create the utility file
importer.addLocalModule('*', mod_from='utility')
utility_obj = utility_function('load_data')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create the production data reader file
importer.addLocalModule('dataReader', mod_from='data_reader')
readers = ['sqlite','influx']
if 's3' in config.keys():
readers.append('s3')
reader_obj = data_reader(readers)
with open(deploy_path/"data_reader.py", 'w') as f:
f.write(file_header(usecase) + reader_obj.get_code())
generated_files.append("data_reader.py")
# create empty init file to make a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
code = file_header(usecase)
code += importer.getCode()
code += loader.getInputOutputFiles()
code += loader.getCode()
with open(deploy_path/"aionCode.py", "w") as f:
f.write(code)
generated_files.append("aionCode.py")
with open(deploy_path/"requirements.txt", "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer(), reader_obj.get_importer()])
f.write(req)
generated_files.append("requirements.txt")
config_file = deploy_path/"config.json"
config_data = get_load_data_params(config)
with open (config_file, "w") as f:
json.dump(config_data, f, indent=4)
generated_files.append("config.json")
create_docker_file('load_data', deploy_path,config['modelName'],generated_files)<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
from mlac.ml.core import *
from .utility import *
imported_modules = [
{'module': 'sys', 'mod_from': None, 'mod_as': None},
{'module': 'json', 'mod_from': None, 'mod_as': None},
{'module': 'math', 'mod_from': None, 'mod_as': None},
{'module': 'joblib', 'mod_from': None, 'mod_as': None},
{'module': 'mlflow', 'mod_from': None, 'mod_as': None},
{'module': 'sklearn', 'mod_from': None, 'mod_as': None},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None},
{'module': 'numpy', 'mod_from': None, 'mod_as': 'np'},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'argparse', 'mod_from': None, 'mod_as': None},
{'module': 'stats', 'mod_from': 'scipy', 'mod_as': 'st'},
{'module': 'platform', 'mod_from': None, 'mod_as': None }
]
def get_drift_params(config):
param_keys = ["modelVersion","problem_type","target_feature","selected_features","scoring_criteria","s3"]
data = {key:value for (key,value) in config.items() if key in param_keys}
usecase = config['modelName']
data['targetPath'] = usecase
if config['dataLocation'] != '':
data['inputUri'] = config['dataLocation']
else:
data['inputUri'] = '<input datalocation>'
data['prod_db_type'] = config.get('prod_db_type', 'sqlite')
data['db_config'] = config.get('db_config', {})
data['mlflow_config'] = config.get('mlflow_config', {'artifacts_uri':'','tracking_uri_type':'','tracking_uri':'','registry_uri':''})
return data
def run_drift_analysis(config):
init_variables()
importer = importModule()
function = global_function()
drifter = drift()
importModules(importer, imported_modules)
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'ModelMonitoring'
deploy_path.mkdir(parents=True, exist_ok=True)
generated_files = []
# create the utility file
importer.addLocalModule('*', mod_from='utility')
utility_obj = utility_function('drift')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create the production data reader file
importer.addLocalModule('dataReader', mod_from='data_reader')
readers = ['sqlite','influx']
if 's3' in config.keys():
readers.append('s3')
reader_obj = data_reader(readers)
with open(deploy_path/"data_reader.py", 'w') as f:
f.write(file_header(usecase) + reader_obj.get_code())
generated_files.append("data_reader.py")
# create empty init file to make a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
importer.addLocalModule('inputdrift', mod_from='input_drift')
code = file_header(usecase)
code += importer.getCode()
code += drifter.getInputOutputFiles()
code += function.getCode()
code += drifter.get_main_drift_code(config['problem_type'], get_variable('smaller_is_better', False))
with open(deploy_path/"aionCode.py", "w") as f:
f.write(code)
generated_files.append("aionCode.py")
input_drift_importer = importModule()
importModules(input_drift_importer, drifter.get_input_drift_import_modules())
code = file_header(usecase)
code += input_drift_importer.getCode()
code += drifter.get_input_drift_code()
with open(deploy_path/"input_drift.py", "w") as f:
f.write(code)
generated_files.append("input_drift.py")
with open (deploy_path/"config.json", "w") as f:
json.dump(get_drift_params(config), f, indent=4)
generated_files.append("config.json")
req_file = deploy_path/"requirements.txt"
with open(req_file, "w") as f:
f.write(importer.getBaseModule(extra_importers=[utility_obj.get_importer(), reader_obj.get_importer(), input_drift_importer]))
generated_files.append("requirements.txt")
create_docker_file('input_drift', deploy_path,config['modelName'], generated_files)
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
import platform
from mlac.ml.core import *
from .utility import *
output_file_map = {
'feature_reducer' : {'feature_reducer' : 'feature_reducer.pkl'}
}
def get_selector_params(config):
param_keys = ["modelVersion","problem_type","target_feature","train_features","cat_features","n_components"]
data = {key:value for (key,value) in config.items() if key in param_keys}
data['targetPath'] = config['modelName']
return data
def run_selector(config):
select = selector()
importer = importModule()
function = global_function()
importModules(importer,select.getPrefixModules())
select.addPrefixCode()
if config["target_feature"] in config["train_features"]:
config["train_features"].remove(config["target_feature"])
select.addStatement("train_features = df.columns.tolist()")
select.addStatement("target_feature = config['target_feature']")
select.addStatement("train_features.remove(target_feature)")
select.addStatement("cat_features = prev_step_output['cat_features']")
select.add_variable('total_features',[])
select.addStatement("log.log_dataframe(df)")
methods = config.get("feature_selector", None)
feature_reducer = config.get("feature_reducer", None)
select.addStatement("selected_features = {}")
select.addStatement("meta_data['featureengineering']= {}")
if feature_reducer:
update_variable('feature_reducer', True)
select.addStatement(f"log.info('Running dimensionality reduction technique( {feature_reducer})')")
if feature_reducer == 'pca':
importer.addModule('PCA', mod_from='sklearn.decomposition')
if int(config["n_components"]) == 0:
select.addStatement("dimension_reducer = PCA(n_components='mle',svd_solver = 'full')")
elif int(config["n_components"]) < 1:
select.addStatement("dimension_reducer = PCA(n_components=config['n_components'],svd_solver = 'full')")
else:
select.addStatement("dimension_reducer = PCA(n_components=config['n_components'])")
elif feature_reducer == 'svd':
importer.addModule('TruncatedSVD', mod_from='sklearn.decomposition')
if config["n_components"] < 2:
config["n_components"] = 2
select.addStatement("dimension_reducer = TruncatedSVD(n_components=config['n_components'], n_iter=7, random_state=42)")
elif feature_reducer == 'factoranalysis':
importer.addModule('FactorAnalysis', mod_from='sklearn.decomposition')
if config["n_components"] == 0:
select.addStatement("dimension_reducer = FactorAnalysis()")
else:
select.addStatement("dimension_reducer = FactorAnalysis(n_components=config['n_components'])")
elif feature_reducer == 'ica':
importer.addModule('FastICA', mod_from='sklearn.decomposition')
if config["n_components"] == 0:
select.addStatement("dimension_reducer = FastICA()")
else:
select.addStatement("dimension_reducer = FastICA(n_components=config['n_components'])")
select.addStatement("pca_array = dimension_reducer.fit_transform(df[train_features])")
select.addStatement("pca_columns = ['pca_'+str(e) for e in list(range(pca_array.shape[1]))]")
select.addStatement("scaledDF = pd.DataFrame(pca_array, columns=pca_columns)")
select.addStatement("scaledDF[target_feature] = df[target_feature]")
select.addStatement("df = scaledDF")
select.addStatement(f"selected_features['{feature_reducer}'] = pca_columns")
select.addStatement("total_features = df.columns.tolist()")
select.addStatement("meta_data['featureengineering']['feature_reducer']= {}")
select.addStatement("reducer_file_name = str(targetPath/IOFiles['feature_reducer'])")
importer.addModule('joblib')
select.addStatement("joblib.dump(dimension_reducer, reducer |
_file_name)")
select.addStatement("meta_data['featureengineering']['feature_reducer']['file']= IOFiles['feature_reducer']")
select.addStatement("meta_data['featureengineering']['feature_reducer']['features']= train_features")
select.addOutputFiles(output_file_map['feature_reducer'])
elif methods:
if 'allFeatures' in methods:
addDropFeature('target_feature', 'train_features', select)
select.addStatement("selected_features['allFeatures'] = train_features")
if 'modelBased' in methods:
select.addStatement(f"log.info('Model Based Correlation Analysis Start')")
select.addStatement("model_based_feat = []")
importer.addModule('numpy', mod_as='np')
importer.addModule('RFE', mod_from='sklearn.feature_selection')
importer.addModule('MinMaxScaler', mod_from='sklearn.preprocessing')
if config["problem_type"] == 'classification':
importer.addModule('ExtraTreesClassifier', mod_from='sklearn.ensemble')
select.addStatement("estimator = ExtraTreesClassifier(n_estimators=100)")
else:
importer.addModule('Lasso', mod_from='sklearn.linear_model')
select.addStatement("estimator = Lasso()")
select.addStatement("estimator.fit(df[train_features],df[target_feature])")
select.addStatement("rfe = RFE(estimator, n_features_to_select=1, verbose =0 )")
select.addStatement("rfe.fit(df[train_features],df[target_feature])")
select.addStatement("ranks = MinMaxScaler().fit_transform(-1*np.array([list(map(float, rfe.ranking_))]).T).T[0]")
select.addStatement("ranks = list(map(lambda x: round(x,2), ranks))")
select.addStatement("for item, rank in zip(df.columns,ranks):")
select.addStatement("if rank > 0.30:", indent=2)
select.addStatement("model_based_feat.append(item)", indent=3)
addDropFeature('target_feature', 'model_based_feat', select)
select.addStatement("selected_features['modelBased'] = model_based_feat")
select.addStatement(f"log.info(f'Highly Correlated Features : {{model_based_feat}}')")
if 'statisticalBased' in methods:
select.addStatement(f"log.info('Statistical Based Correlation Analysis Start')")
function.add_function('start_reducer',importer)
select.addStatement(f"features = start_reducer(df, target_feature, {config['corr_threshold']},{config['var_threshold']})")
select.addStatement("train_features = [x for x in features if x in train_features]")
select.addStatement("cat_features = [x for x in cat_features if x in features]")
select.addStatement("numeric_features = df[features].select_dtypes('number').columns.tolist()")
if config["problem_type"] == 'classification':
function.add_function('feature_importance_class')
select.addStatement(f"statistics_based_feat = feature_importance_class(df[features], numeric_features, cat_features, target_feature, {config['pValueThreshold']},{config['corr_threshold']})")
else:
function.add_function('feature_importance_reg')
select.addStatement(f"statistics_based_feat = feature_importance_reg(df[features], numeric_features, target_feature, {config['pValueThreshold']},{config['corr_threshold']})")
addDropFeature('target_feature', 'statistics_based_feat', select)
select.addStatement("selected_features['statisticalBased'] = statistics_based_feat")
select.addStatement(f"log.info('Highly Correlated Features : {{statistics_based_feat}}')")
select.addStatement("total_features = list(set([x for y in selected_features.values() for x in y] + [target_feature]))")
select.addStatement(f"df = df[total_features]")
select.addStatement("log.log_dataframe(df)")
select.addSuffixCode()
importModules(importer, select.getSuffixModules())
importModules(importer, select.getMainCodeModules())
select.addMainCode()
generated_files = []
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'FeatureEngineering'
deploy_path.mkdir(parents=True, exist_ok=True)
# create the utility file
importer.addLocalModule('*', mod_from='utility')
utility_obj = utility_function('selector')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create empty init file to make a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
code = file_header(usecase)
code += importer.getCode()
code += select.getInputOutputFiles()
code += function.getCode()
select.addLocalFunctionsCode()
code += select.getCode()
with open(deploy_path/"aionCode.py", "w") as f:
f.write(code)
generated_files.append("aionCode.py")
with open(deploy_path/"requirements.txt", "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()])
f.write(req)
generated_files.append("requirements.txt")
config_file = deploy_path/"config.json"
config_data = get_selector_params(config)
with open (config_file, "w") as f:
json.dump(config_data, f, indent=4)
generated_files.append("config.json")
create_docker_file('selector', deploy_path,config['modelName'], generated_files)<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from .imports import importModule
supported_reader = ['sqlite', 'influx','s3']
functions_code = {
'dataReader':{'imports':[{'mod':'json'},{'mod': 'Path', 'mod_from': 'pathlib', 'mod_as': None},{'mod': 'pandas', 'mod_from': None, 'mod_as': 'pd'}],'code':"""
class dataReader():
def get_reader(self, reader_type, target_path=None, config=None):
if reader_type == 'sqlite':
return sqlite_writer(target_path=target_path)
elif reader_type == 'influx':
return Influx_writer(config=config)
elif reader_type == 'gcs':
return gcs(config=config)
elif reader_type == 'azure':
return azure(config=config)
elif reader_type == 's3':
return s3bucket(config=config)
else:
raise ValueError(reader_type)
"""
},
'sqlite':{'imports':[{'mod':'sqlite3'},{'mod': 'pandas', 'mod_from': None, 'mod_as': 'pd'},{'mod': 'Path', 'mod_from': 'pathlib', 'mod_as': None}],'code':"""\\n\\
class sqlite_writer():
def __init__(self, target_path):
self.target_path = Path(target_path)
database_file = self.target_path.stem + '.db'
self.db = sqlite_db(self.target_path, database_file)
def file_exists(self, file):
if file:
return self.db.table_exists(file)
else:
return False
def read(self, file):
return self.db.read(file)
def write(self, data, file):
self.db.write(data, file)
def close(self):
self.db.close()
class sqlite_db():
def __init__(self, location, database_file=None):
if not isinstance(location, Path):
location = Path(location)
if database_file:
self.database_name = database_file
else:
self.database_name = location.stem + '.db'
db_file = str(location/self.database_name)
self.conn = sqlite3.connect(db_file)
self.cursor = self.conn.cursor()
self.tables = []
def table_exists(self, name):
if name in self.tables:
return True
elif name:
query = f"SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';"
listOfTables = self.cursor.execute(query).fetchall()
if len(listOfTables) > 0 :
self.tables.append(name)
return True
return False
def read(self, table_name):
return pd.read_sql_query(f"SELECT * FROM {table_name}", self.conn)
def create_table(self,name, columns, dtypes):
query = f'CREATE TABLE IF NOT EXISTS {name} ('
for column, data_type in zip(columns, dtypes):
query += f"'{column}' TEXT,"
query = query[:-1]
query += ');'
self.conn.execute(query)
return True
def write(self,data, table_name):
if not self.table_exists(table_name):
self.create_table(table_name, data.columns, data.dtypes)
tuple_data = list(data.itertuples(index=False, name=None))
insert_query = f'INSERT INTO {table_name} VALUES('
for i in range(len(data.columns)):
insert_query += '?,'
insert_query = insert_query[:-1] + ')'
self.cursor.executemany(insert_query, tuple_data)
self.conn.commit()
return True
def delete(self, name):
pass
def close(self):
self.conn.close()
"""
},
'influx':{'imports':[{'mod':'InfluxDBClient','mod_from':'influxdb'},{'mod': 'Path', 'mod_from': 'pathlib', 'mod_as': None},{'mod': 'pandas', 'mod_from': None, 'mod_as': 'pd'}],'code':"""\\n\\
class Influx_writer():
def __init__(self, config):
self.db = influx_db(config)
def file_exists(self, file):
if file:
return self.db.table_exists(file)
else:
return False
def read(self, file):
query = "SELECT * FROM {}".format(file)
if 'read_time' in self.db_config.keys() and self.db_config['read_time']:
query += f" time > now() - {self.db_config['read_time']}"
return self.db.read(query)
def write(self, data, file):
self.db.write(data, file)
def close(self):
pass
class influx_db():
def __init__(self, config):
self.host = config['host']
self.port = config['port']
self.user = config.get('user', None)
self.password = config.get('password', None)
self.token = config.get('token', None)
self.database = config['database']
self.measurement = config['measurement']
self.tags = config['tags']
self.client = self.get_client()
def table_exists(self, name):
query = f"SHOW MEASUREMENTS ON {self.database}"
result = self.client(query)
for measurement in result['measurements']:
if measurement['name'] == name:
return True
return False
def read(self, query)->pd.DataFrame:
cursor = self.client.query(query)
points = cursor.get_points()
my_list=list(points)
df=pd.DataFrame(my_list)
return df
def get_client(self):
headers = None
if self.token:
headers={"Authorization": self.token}
client = InfluxDBClient(self.host,self.port,self.user, self.password,headers=headers)
databases = client.get_list_database()
databases = [x['name'] for x in databases]
if self.database not in databases:
client.create_database(self.database)
return InfluxDBClient(self.host,self.port,self.user,self.password,self.database,headers=headers)
def write(self,data, measurement=None):
if isinstance(data, pd.DataFrame):
sorted_col = data.columns.tolist()
sorted_col.sort()
data = data[sorted_col]
data = data.to_dict(orient='records')
if not measurement:
measurement = self.measurement
for row in data:
if 'time' in row.keys():
p = '%Y-%m-%dT%H:%M:%S.%fZ'
time_str = datetime.strptime(row['time'], p)
del row['time']
else:
time_str = None
if 'model_ver' in row.keys():
self.tags['model_ver']= row['model_ver']
del row['model_ver']
json_body = [{
'measurement': measurement,
'time': time_str,
'tags': self.tags,
'fields': row
}]
self.client.write_points(json_body)
def delete(self, name):
pass
def close(self):
self.client.close()
"""
},
's3':{'imports':[{'mod':'boto3'},{'mod': 'ClientError', 'mod_from': 'botocore.exceptions'},{'mod': 'Path', 'mod_from': 'pathlib'},{'mod': 'pandas', 'mod_as': 'pd'}],'code':"""\\n\\
class s3bucket():
def __init__(self, config={}):
if 's3' in config.keys():
config = config['s3']
aws_access_key_id = config.get('aws_access_key_id','')
|
aws_secret_access_key = config.get('aws_secret_access_key','')
bucket_name = config.get('bucket_name','')
if not aws_access_key_id:
raise ValueError('aws_access_key_id can not be empty')
if not aws_secret_access_key:
raise ValueError('aws_secret_access_key can not be empty')
self.client = boto3.client('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=str(aws_secret_access_key))
self.bucket_name = bucket_name
def read(self, file_name):
try:
response = self.client.get_object(Bucket=self.bucket_name, Key=file_name)
return pd.read_csv(response['Body'])
except ClientError as ex:
if ex.response['Error']['Code'] == 'NoSuchBucket':
raise ValueError(f"Bucket '{self.bucket_name}' not found in aws s3 storage")
elif ex.response['Error']['Code'] == 'NoSuchKey':
raise ValueError(f"File '{file_name}' not found in s3 bucket '{self.bucket_name}'")
else:
raise
"""
},
'azure':{'imports':[{'mod':'DataLakeServiceClient', 'mod_from':'azure.storage.filedatalake'},{'mod':'detect', 'mod_from':'detect_delimiter'},{'mod':'pandavro', 'mod_as':'pdx'},{'mod':'io'},{'mod': 'Path', 'mod_from': 'pathlib'},{'mod': 'pandas', 'mod_as': 'pd'}],'code':"""\\n\\
def azure():
def __init__(self,config={}):
if 'azure' in config.keys():
config = config['azure']
account_name = config.get('account_name','')
account_key = config.get('account_key','')
container_name = config.get('container_name','')
if not account_name:
raise ValueError('Account name can not be empty')
if not account_key:
raise ValueError('Account key can not be empty')
if not container_name:
raise ValueError('Container name can not be empty')
service_client = DataLakeServiceClient(account_url="{}://{}.dfs.core.windows.net".format("https", account_name), credential=account_key)
self.file_system_client = service_client.get_file_system_client(container_name)
def read(self, directory_name):
root_dir = str(directory_name)
file_paths = self.file_system_client.get_paths(path=root_dir)
main_df = pd.DataFrame()
for path in file_paths:
if not path.is_directory:
file_client = file_system_client.get_file_client(path.name)
file_ext = Path(path.name).suffix
if file_ext in [".csv", ".tsv"]:
with open(csv_local, "wb") as my_file:
file_client.download_file().readinto(my_file)
with open(csv_local, 'r') as file:
data = file.read()
row_delimiter = detect(text=data, default=None, whitelist=[',', ';', ':', '|', '\\t'])
processed_df = pd.read_csv(csv_local, sep=row_delimiter)
elif file_ext == ".parquet":
stream = io.BytesIO()
file_client.download_file().readinto(stream)
processed_df = pd.read_parquet(stream, engine='pyarrow')
elif file_ext == ".avro":
with open(avro_local, "wb") as my_file:
file_client.download_file().readinto(my_file)
processed_df = pdx.read_avro(avro_local)
if main_df.empty:
main_df = pd.DataFrame(processed_df)
else:
main_df = main_df.append(processed_df, ignore_index=True)
return main_df
"""
},
'gcs':{'imports':[{'mod':'storage','mod_from':'google.cloud'},{'mod': 'Path', 'mod_from': 'pathlib'},{'mod': 'pandas', 'mod_as': 'pd'}],'code':"""\\n\\
class gcs():
def __init__(self, config={}):
if 'gcs' in config.keys():
config = config['gcs']
account_key = config.get('account_key','')
bucket_name = config.get('bucket_name','')
if not account_key:
raise ValueError('Account key can not be empty')
if not bucket_name:
raise ValueError('bucket name can not be empty')
storage_client = storage.Client.from_service_account_json(account_key)
self.bucket = storage_client.get_bucket(bucket_name)
def read(self, bucket_name, file_name):
data = self.bucket.blob(file_name).download_as_text()
return pd.read_csv(data, encoding = 'utf-8', sep = ',')
"""
}
}
class data_reader():
def __init__(self, reader_type=[]):
self.supported_readers = supported_reader
if isinstance(reader_type, str):
self.readers = [reader_type]
elif not reader_type:
self.readers = self.supported_readers
else:
self.readers = reader_type
unsupported_reader = [ x for x in self.readers if x not in self.supported_readers]
if unsupported_reader:
raise ValueError(f"reader type '{unsupported_reader}' is not supported\\nSupported readers are {self.supported_readers}")
self.codeText = ""
self.importer = importModule()
def get_reader_code(self, readers):
reader_code = {
'sqlite': 'return sqlite_writer(target_path=target_path)',
'influx': 'return Influx_writer(config=config)',
'gcs': 'return gcs(config=config)',
'azure': 'return azure(config=config)',
's3': 'return s3bucket(config=config)'
}
code = "\\n\\ndef dataReader(reader_type, target_path=None, config=None):\\n"
for i, reader in enumerate(readers):
if not i:
code += f" if reader_type == '{reader}':\\n"
else:
code += f" elif reader_type == '{reader}':\\n"
code += f" {reader_code[reader]}\\n"
if readers:
code += " else:\\n"
code += f""" raise ValueError("'{{reader_type}}' not added during code generation")\\n"""
else:
code += f""" raise ValueError("'{{reader_type}}' not added during code generation")\\n"""
return code
def get_code(self):
code = self.get_reader_code(self.readers)
functions = []
for reader in self.readers:
functions.append(reader)
for function in functions:
code += self.get_function_code(function)
self.codeText += self.importer.getCode()
self.codeText += code
return self.codeText
def get_function_code(self, name):
code = ""
if name in functions_code.keys():
code += functions_code[name]['code']
if self.importer:
if 'imports' in functions_code[name].keys():
for module in functions_code[name]['imports']:
mod_name = module['mod']
mod_from = module.get('mod_from', None)
mod_as = module.get('mod_as', None)
self.importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as)
return code
def get_importer(self):
return self.importer
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
class output_drift():
def __init__(self, missing=False, word2num_features = None, cat_encoder=False, target_encoder=False, normalizer=False, text_profiler=False, feature_reducer=False, score_smaller_is_better=True, problem_type='classification', tab_size=4):
self.tab = ' ' * tab_size
self.codeText = ''
self.missing = missing
self.word2num_features = word2num_features
self.cat_encoder = cat_encoder
self.target_encoder = target_encoder
self.normalizer = normalizer
self.text_profiler = text_profiler
self.feature_reducer = feature_reducer
self.score_smaller_is_better = score_smaller_is_better
self.problem_type = problem_type
def addDatabaseClass(self, indent=0):
text = "\\
\\nclass database():\\
\\n def __init__(self, config):\\
\\n self.host = config['host']\\
\\n self.port = config['port']\\
\\n self.user = config['user']\\
\\n self.password = config['password']\\
\\n self.database = config['database']\\
\\n self.measurement = config['measurement']\\
\\n self.tags = config['tags']\\
\\n self.client = self.get_client()\\
\\n\\
\\n def read_data(self, query)->pd.DataFrame:\\
\\n cursor = self.client.query(query)\\
\\n points = cursor.get_points()\\
\\n my_list=list(points)\\
\\n df=pd.DataFrame(my_list)\\
\\n return df\\
\\n\\
\\n def get_client(self):\\
\\n client = InfluxDBClient(self.host,self.port,self.user,self.password)\\
\\n databases = client.get_list_database()\\
\\n databases = [x['name'] for x in databases]\\
\\n if self.database not in databases:\\
\\n client.create_database(self.database)\\
\\n return InfluxDBClient(self.host,self.port,self.user,self.password, self.database)\\
\\n\\
\\n def write_data(self,data):\\
\\n if isinstance(data, pd.DataFrame):\\
\\n sorted_col = data.columns.tolist()\\
\\n sorted_col.sort()\\
\\n data = data[sorted_col]\\
\\n data = data.to_dict(orient='records')\\
\\n for row in data:\\
\\n if 'time' in row.keys():\\
\\n p = '%Y-%m-%dT%H:%M:%S.%fZ'\\
\\n time_str = datetime.strptime(row['time'], p)\\
\\n del row['time']\\
\\n else:\\
\\n time_str = None\\
\\n if 'model_ver' in row.keys():\\
\\n self.tags['model_ver']= row['model_ver']\\
\\n del row['model_ver']\\
\\n json_body = [{\\
\\n 'measurement': self.measurement,\\
\\n 'time': time_str,\\
\\n 'tags': self.tags,\\
\\n 'fields': row\\
\\n }]\\
\\n self.client.write_points(json_body)\\
\\n\\
\\n def close(self):\\
\\n self.client.close()\\
\\n"
if indent:
text = text.replace('\\n', (self.tab * indent) + '\\n')
return text
def addPredictClass(self, indent=0):
text = "\\
\\nclass predict():\\
\\n\\
\\n def __init__(self, base_config):\\
\\n self.usecase = base_config['modelName'] + '_' + base_config['modelVersion']\\
\\n self.dataLocation = base_config['dataLocation']\\
\\n self.db_enabled = base_config.get('db_enabled', False)\\
\\n if self.db_enabled:\\
\\n self.db_config = base_config['db_config']\\
\\n home = Path.home()\\
\\n if platform.system() == 'Windows':\\
\\n from pathlib import WindowsPath\\
\\n output_data_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'Data'\\
\\n output_model_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'target'/self.usecase\\
\\n else:\\
\\n from pathlib import PosixPath\\
\\n output_data_dir = PosixPath(home)/'HCLT'/'AION'/'Data'\\
\\n output_model_dir = PosixPath(home)/'HCLT'/'AION'/'target'/self.usecase\\
\\n if not output_model_dir.exists():\\
\\n raise ValueError(f'Configuration file not found at {output_model_dir}')\\
\\n\\
\\n tracking_uri = 'file:///' + str(Path(output_model_dir)/'mlruns')\\
\\n registry_uri = 'sqlite:///' + str(Path(output_model_dir)/'mlruns.db')\\
\\n mlflow.set_tracking_uri(tracking_uri)\\
\\n mlflow.set_registry_uri(registry_uri)\\
\\n client = mlflow.tracking.MlflowClient(\\
\\n tracking_uri=tracking_uri,\\
\\n registry_uri=registry_uri,\\
\\n )\\
\\n self.model_version = client.get_latest_versions(self.usecase, stages=['production'] )[0].version\\
\\n model_version_uri = 'models:/{model_name}/production'.format(model_name=self.usecase)\\
\\n self.model = mlflow.pyfunc.load_model(model_version_uri)\\
\\n run = client.get_run(self.model.metadata.run_id)\\
\\n if run.info.artifact_uri.startswith('file:'): #remove file:///\\
\\n self.artifact_path = Path(run.info.artifact_uri[len('file:///') : ])\\
\\n else:\\
\\n self.artifact_path = Path(run.info.artifact_uri)\\
\\n with open(self.artifact_path/'deploy.json', 'r') as f:\\
\\n deployment_dict = json.load(f)\\
\\n with open(self.artifact_path/'features.txt', 'r') as f:\\
\\n self.train_features = f.readline(). |
rstrip().split(',')\\
\\n\\
\\n self.dataLocation = base_config['dataLocation']\\
\\n self.selected_features = deployment_dict['load_data']['selected_features']\\
\\n self.target_feature = deployment_dict['load_data']['target_feature']\\
\\n self.output_model_dir = output_model_dir"
if self.missing:
text += "\\n self.missing_values = deployment_dict['transformation']['fillna']"
if self.word2num_features:
text += "\\n self.word2num_features = deployment_dict['transformation']['word2num_features']"
if self.cat_encoder == 'labelencoding':
text += "\\n self.cat_encoder = deployment_dict['transformation']['cat_encoder']"
elif (self.cat_encoder == 'targetencoding') or (self.cat_encoder == 'onehotencoding'):
text += "\\n self.cat_encoder = deployment_dict['transformation']['cat_encoder']['file']"
text += "\\n self.cat_encoder_cols = deployment_dict['transformation']['cat_encoder']['features']"
if self.target_encoder:
text += "\\n self.target_encoder = joblib.load(self.artifact_path/deployment_dict['transformation']['target_encoder'])"
if self.normalizer:
text += "\\n self.normalizer = joblib.load(self.artifact_path/deployment_dict['transformation']['normalizer']['file'])\\
\\n self.normalizer_col = deployment_dict['transformation']['normalizer']['features']"
if self.text_profiler:
text += "\\n self.text_profiler = joblib.load(self.artifact_path/deployment_dict['transformation']['Status']['text_profiler']['file'])\\
\\n self.text_profiler_col = deployment_dict['transformation']['Status']['text_profiler']['features']"
if self.feature_reducer:
text += "\\n self.feature_reducer = joblib.load(self.artifact_path/deployment_dict['featureengineering']['feature_reducer']['file'])\\
\\n self.feature_reducer_cols = deployment_dict['featureengineering']['feature_reducer']['features']"
text += """
def read_data_from_db(self):
if self.db_enabled:
try:
db = database(self.db_config)
query = "SELECT * FROM {} WHERE model_ver = '{}' AND {} != ''".format(db.measurement, self.model_version, self.target_feature)
if 'read_time' in self.db_config.keys() and self.db_config['read_time']:
query += f" time > now() - {self.db_config['read_time']}"
data = db.read_data(query)
except:
raise ValueError('Unable to read from the database')
finally:
if db:
db.close()
return data
return None"""
text += "\\
\\n def predict(self, data):\\
\\n df = pd.DataFrame()\\
\\n if Path(data).exists():\\
\\n if Path(data).suffix == '.tsv':\\
\\n df=read_data(data,encoding='utf-8',sep='\\t')\\
\\n elif Path(data).suffix == '.csv':\\
\\n df=read_data(data,encoding='utf-8')\\
\\n else:\\
\\n if Path(data).suffix == '.json':\\
\\n jsonData = read_json(data)\\
\\n df = pd.json_normalize(jsonData)\\
\\n elif is_file_name_url(data):\\
\\n df = read_data(data,encoding='utf-8')\\
\\n else:\\
\\n jsonData = json.loads(data)\\
\\n df = pd.json_normalize(jsonData)\\
\\n if len(df) == 0:\\
\\n raise ValueError('No data record found')\\
\\n missing_features = [x for x in self.selected_features if x not in df.columns]\\
\\n if missing_features:\\
\\n raise ValueError(f'some feature/s is/are missing: {missing_features}')\\
\\n if self.target_feature not in df.columns:\\
\\n raise ValueError(f'Ground truth values/target column({self.target_feature}) not found in current data')\\
\\n df_copy = df.copy()\\
\\n df = df[self.selected_features]"
if self.word2num_features:
text += "\\n for feat in self.word2num_features:"
text += "\\n df[ feat ] = df[feat].apply(lambda x: s2n(x))"
if self.missing:
text += "\\n df.fillna(self.missing_values, inplace=True)"
if self.cat_encoder == 'labelencoding':
text += "\\n df.replace(self.cat_encoder, inplace=True)"
elif self.cat_encoder == 'targetencoding':
text += "\\n cat_enc = joblib.load(self.artifact_path/self.cat_encoder)"
text += "\\n df = cat_enc.transform(df)"
elif self.cat_encoder == 'onehotencoding':
text += "\\n cat_enc = joblib.load(self.artifact_path/self.cat_encoder)"
text += "\\n transformed_data = cat_enc.transform(df[self.cat_encoder_cols]).toarray()"
text += "\\n df[cat_enc.get_feature_names()] = pd.DataFrame(transformed_data, columns=cat_enc.get_feature_names())[cat_enc.get_feature_names()]"
if self.normalizer:
text += "\\n df[self.normalizer_col] = self.normalizer.transform(df[self.normalizer_col])"
if self.text_profiler:
text += "\\n text_corpus = df[self.text_profiler_col].apply(lambda row: ' '.join(row.values.astype(str)), axis=1)\\
\\n df_vect=self.text_profiler.transform(text_corpus)\\
\\n if isinstance(df_vect, np.ndarray):\\
\\n df1 = pd.DataFrame(df_vect)\\
\\n else:\\
\\n df1 = pd.DataFrame(df_vect.toarray(),columns = self.text_profiler.named_steps['vectorizer'].get_feature_names())\\
\\n df1 = df1.add_suffix('_vect')\\
\\n df = pd.concat([df, df1],axis=1)"
if self.feature_reducer:
text += "\\n df = self.feature_reducer.transform(df[self.feature_reducer_cols])"
else:
text += "\\n df = df[self.train_features]"
if self.target_encoder:
text += "\\n output = pd.DataFrame(self.model._model_impl.predict_proba(df), columns=self.target_encoder.classes_)\\
\\n df_copy['prediction'] = output.idxmax(axis=1)"
else:
text += "\\n output = self.model.predict(df).reshape(1, -1)[0].round(2)\\
\\n df_copy['prediction'] = output"
text += "\\n return df_copy"
if indent:
text = text.replace('\\n', (self.tab * indent) + '\\n')
return text
def getClassificationMatrixCode(self, indent=0):
text = "\\
\\ndef get_classification_metrices(actual_values, predicted_values):\\
\\n result = {}\\
\\n accuracy_score = sklearn.metrics.accuracy_score(actual_values, predicted_values)\\
\\n avg_precision = sklearn.metrics.precision_score(actual_values, predicted_values,\\
\\n average='macro')\\
\\n avg_recall = sklearn.metrics.recall_score(actual_values, predicted_values,\\
\\n average='macro')\\
\\n avg_f1 = sklearn.metrics.f1_score(actual_values, predicted_values,\\
\\n average='macro')\\
\\n\\
\\n result['accuracy'] = accuracy_score\\
\\n result['precision'] = avg_precision\\
\\n result['recall'] = avg_recall\\
\\n result['f1'] = avg_f1\\
\\n return result\\
\\n\\
"
if indent:
text = text.replace('\\n', (self.tab * indent) + '\\n')
return text
def getRegrssionMatrixCode(self, indent=0):
text = "\\
\\ndef get_regression_metrices( actual_values, predicted_values):\\
\\n result = {}\\
\\n\\
\\n me = np.mean(predicted_values - actual_values)\\
\\n sde = np.std(predicted_values - actual_values, ddof = 1)\\
\\n\\
\\n abs_err = np.abs(predicted_values - actual_values)\\
\\n mae = np.mean(abs_err)\\
\\n sdae = np.std(abs_err, ddof = 1)\\
\\n\\
\\n abs_perc_err = 100.*np.abs(predicted_values - actual_values) / actual_values\\
\\n mape = np.mean(abs_perc_err)\\
\\n sdape = np.std(abs_perc_err, ddof = 1)\\
\\n\\
\\n result['mean_error'] = me\\
\\n result['mean_abs_error'] = mae\\
\\n result['mean_abs_perc_error'] = mape\\
\\n result['error_std'] = sde\\
\\n result['abs_error_std'] = sdae\\
\\n result['abs_perc_error_std'] = sdape\\
\\n return result\\
\\n\\
"
if indent:
text = text.replace('\\n', (self.tab * indent) + '\\n')
return text
def addSuffixCode(self, indent=1):
text ="\\n\\
\\ndef check_drift( config):\\
\\n prediction = predict(config)\\
\\n usecase = config['modelName'] + '_' + config['modelVersion']\\
\\n train_data_path = prediction.artifact_path/(usecase+'_data.csv')\\
\\n if not train_data_path.exists():\\
\\n raise ValueError(f'Training data not found at {train_data_path}')\\
\\n curr_with_pred = prediction.read_data_from_db()\\
\\n if prediction.target_feature not in curr_with_pred.columns:\\
\\n raise ValueError('Ground truth not updated for corresponding data in database')\\
\\n train_with_pred = prediction.predict(train_data_path)\\
\\n performance = {}"
if self.problem_type == 'classification':
text += "\\n\\
\\n performance['train'] = get_classification_metrices(train_with_pred[prediction.target_feature], train_with_pred['prediction'])\\
\\n performance['current'] = get_classification_metrices(curr_with_pred[prediction.target_feature], curr_with_pred['prediction'])"
else:
text += "\\n\\
\\n performance['train'] = get_regression_metrices(train_with_pred[prediction.target_feature], train_with_pred['prediction'])\\
\\n performance['current'] = get_regression_metrices(curr_with_pred[prediction.target_feature], curr_with_pred['prediction'])"
text += "\\n return performance"
text += "\\n\\
\\nif __name__ == '__main__':\\
\\n try:\\
\\n if len(sys.argv) < 2:\\
\\n raise ValueError('config file not present')\\
\\n config = sys.argv[1]\\
\\n if Path(config).is_file() and Path(config).suffix == '.json':\\
\\n with open(config, 'r') as f:\\
\\n config = json.load(f)\\
\\n else:\\
\\n config = json.loads(config)\\
\\n output = check_drift(config)\\
\\n status = {'Status':'Success','Message':json.loads(output)}\\
\\n print('output_drift:'+json.dumps(status))\\
\\n except Exception as e:\\
\\n status = {'Status':'Failure','Message':str(e)}\\
\\n print('output_drift:'+json.dumps(status))"
if indent:
text = text.replace('\\n', (self.tab * indent) + '\\n')
return text
def addStatement(self, statement, indent=1):
self.codeText += '\\n' + self.tab * indent + statement
def generateCode(self):
self.codeText += self.addDatabaseClass()
self.codeText += self.addPredictClass()
if self.problem_type == 'classification':
self.codeText += self.getClassificationMatrixCode()
elif self.problem_type == 'regression':
self.codeText += self.getRegrssionMatrixCode()
else:
raise ValueError(f"Unsupported problem type: {self.problem_type}")
self.codeText += self.addSuffixCode()
def getCode(self):
return self.codeText
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class transformer():
def __init__(self, indent=0, tab_size=4):
self.df_name = 'df'
self.tab = ' ' * tab_size
self.codeText = ""
self.transformers = []
self.TxCols = []
self.imputers = {}
self.input_files = {}
self.output_files = {}
self.function_code = ''
self.addInputFiles({'inputData' : 'rawData.dat', 'metaData' : 'modelMetaData.json','log' : 'aion.log','transformedData' : 'transformedData.dat','normalization' : 'normalization.pkl'})
def addInputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def addOutputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def getInputFiles(self):
text = 'IOFiles = '
if not self. |
input_files:
text += '{ }'
else:
text += json.dumps(self.input_files, indent=4)
return text
def getOutputFiles(self):
text = 'output_file = '
if not self.output_files:
text += '{ }'
else:
text += json.dumps(self.output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\\n'
text += self.getInputFiles()
if indent:
text = text.replace('\\n', self.tab * indent + '\\n')
return text
def __addValidateConfigCode(self):
text = "\\n\\
\\ndef validateConfig():\\
\\n config_file = Path(__file__).parent/'config.json'\\
\\n if not Path(config_file).exists():\\
\\n raise ValueError(f'Config file is missing: {config_file}')\\
\\n config = read_json(config_file)\\
\\n return config"
return text
def getPrefixModules(self):
modules = [
{'module':'Path', 'mod_from':'pathlib'}
,{'module':'pandas', 'mod_as':'pd'}
,{'module':'warnings'}
,{'module':'json'}
,{'module':'logging'}
,{'module':'joblib'}
,{'module':'MinMaxScaler', 'mod_from':'sklearn.preprocessing'}
]
return modules
def addPrefixCode(self, indent=1):
self.codeText += """
def transformation(config, targetPath, log):
dataLoc = targetPath / IOFiles['inputData']
if not dataLoc.exists():
return {'Status': 'Failure', 'Message': 'Data location does not exists.'}
df = read_data(dataLoc)
log.log_dataframe(df)
target_feature = config['target_feature']
dateTimeFeature=config['dateTimeFeature']
df.set_index(dateTimeFeature, inplace=True)
df = df.dropna()
df=df.fillna(df.mean())
if len(target_feature) == 1:
trainX = df[target_feature].to_numpy().reshape(-1,1)
else:
trainX = df[target_feature].to_numpy()
scaler = MinMaxScaler(feature_range=(0, 1))
trainX = scaler.fit_transform(trainX)
normalization_file_name = str(targetPath / IOFiles['normalization'])
joblib.dump(scaler, normalization_file_name)
df[target_feature] = trainX
log.log_dataframe(df)
csv_path = str(targetPath / IOFiles['transformedData'])
write_data(df, csv_path, index=True)
status = {'Status': 'Success', 'DataFilePath': IOFiles['transformedData'],
'target_feature': target_feature,'dateTimeFeature':dateTimeFeature,
"Normalization_file":normalization_file_name }
meta_data['transformation'] = {}
meta_data['transformation']['Status'] = status
write_json(meta_data, str(targetPath / IOFiles['metaData']))
log.info(f'Transformed data saved at {csv_path}')
log.info(f'output: {status}')
return json.dumps(status)
"""
def getMainCodeModules(self):
modules = [{'module':'Path', 'mod_from':'pathlib'}
,{'module':'sys'}
,{'module':'json'}
,{'module':'logging'}
,{'module':'argparse'}
]
return modules
def addMainCode(self, indent=1):
self.codeText += """
if __name__ == '__main__':
config = validateConfig()
targetPath = Path('aion') / config['targetPath']
if not targetPath.exists():
raise ValueError(f'targetPath does not exist')
meta_data_file = targetPath / IOFiles['metaData']
if meta_data_file.exists():
meta_data = read_json(meta_data_file)
else:
raise ValueError(f'Configuration file not found: {meta_data_file}')
log_file = targetPath / IOFiles['log']
log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)
try:
print(transformation(config, targetPath, log))
except Exception as e:
status = {'Status': 'Failure', 'Message': str(e)}
print(json.dumps(status))
"""
def addValidateConfigCode(self, indent=1):
self.function_code += self.__addValidateConfigCode()
def addLocalFunctionsCode(self):
self.addValidateConfigCode()
def addStatement(self, statement, indent=1):
self.codeText += '\\n' + self.tab * indent + statement
def getCode(self, indent=1):
return self.function_code + '\\n' + self.codeText
def getDFName(self):
return self.df_name
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class register():
def __init__(self, importer, indent=0, tab_size=4):
self.tab = " "*tab_size
self.codeText = ""
self.function_code = ""
self.importer = importer
self.input_files = {}
self.output_files = {}
self.addInputFiles({'log' : 'aion.log', 'metaData' : 'modelMetaData.json','metrics': 'metrics.json','production': 'production.json'})
def addInputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def addOutputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def getInputFiles(self):
text = 'IOFiles = '
if not self.input_files:
text += '{ }'
else:
text += json.dumps(self.input_files, indent=4)
return text
def getOutputFiles(self):
text = 'output_file = '
if not self.output_files:
text += '{ }'
else:
text += json.dumps(self.output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\\n'
text += self.getInputFiles()
if indent:
text = text.replace('\\n', self.tab * indent + '\\n')
return text
def __addValidateConfigCode(self, models=None):
text = "\\n\\
\\ndef validateConfig():\\
\\n config_file = Path(__file__).parent/'config.json'\\
\\n if not Path(config_file).exists():\\
\\n raise ValueError(f'Config file is missing: {config_file}')\\
\\n config = utils.read_json(config_file)\\
\\n return config\\
"
return text
def addLocalFunctionsCode(self, models):
self.function_code += self.__addValidateConfigCode(models)
def addPrefixCode(self, smaller_is_better=False, indent=1):
compare = 'min' if smaller_is_better else 'max'
self.codeText += f"""
def get_best_model(run_path):
models_path = [d for d in run_path.iterdir() if d.is_dir]
scores = {{}}
for model in models_path:
metrics = utils.read_json(model/IOFiles['metrics'])
if metrics.get('score', None):
scores[model.stem] = metrics['score']
best_model = {compare}(scores, key=scores.get)
return best_model
def __merge_logs(log_file_sequence,path, files):
if log_file_sequence['first'] in files:
with open(path/log_file_sequence['first'], 'r') as f:
main_log = f.read()
files.remove(log_file_sequence['first'])
for file in files:
with open(path/file, 'r') as f:
main_log = main_log + f.read()
(path/file).unlink()
with open(path/log_file_sequence['merged'], 'w') as f:
f.write(main_log)
def merge_log_files(folder, models):
log_file_sequence = {{
'first': 'aion.log',
'merged': 'aion.log'
}}
log_file_suffix = '_aion.log'
log_files = [x+log_file_suffix for x in models if (folder/(x+log_file_suffix)).exists()]
log_files.append(log_file_sequence['first'])
__merge_logs(log_file_sequence, folder, log_files)
def register(config, targetPath, log):
meta_data_file = targetPath / IOFiles['metaData']
if meta_data_file.exists():
meta_data = utils.read_json(meta_data_file)
else:
raise ValueError(f'Configuration file not found: {{meta_data_file}}')
run_id = meta_data['monitoring']['runId']
usecase = config['targetPath']
current_run_path = targetPath/'runs'/str(run_id)
register_model_name = get_best_model(current_run_path)
models = config['models']
merge_log_files(targetPath, models)
meta_data['register'] = {{'runId':run_id, 'model': register_model_name}}
utils.write_json(meta_data, targetPath/IOFiles['metaData'])
utils.write_json({{'Model':register_model_name,'runNo':str(run_id)}}, targetPath/IOFiles['production'])
status = {{'Status':'Success','Message':f'Model Registered: {{register_model_name}}'}}
log.info(f'output: {{status}}')
return json.dumps(status)
"""
def getMainCodeModules(self):
modules = [{'module':'Path', 'mod_from':'pathlib'}
,{'module':'json'}
]
return modules
def addMainCode(self, models, indent=1):
self.codeText += """
if __name__ == '__main__':
config = validateConfig()
targetPath = Path('aion') / config['targetPath']
if not targetPath.exists():
raise ValueError(f'targetPath does not exist')
log_file = targetPath / IOFiles['log']
log = utils.logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)
try:
print(register(config, targetPath, log))
except Exception as e:
status = {'Status': 'Failure', 'Message': str(e)}
print(json.dumps(status))
"""
def addStatement(self, statement, indent=1):
self.codeText += f"\\n{self.tab * indent}{statement}"
def getCode(self, indent=1):
return self.function_code + '\\n' + self.codeText
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from .imports import importModule
utility_functions = {
'load_data': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
'transformer': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
'selector': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
'train': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
'register': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
'Prediction': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
'drift': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'],
}
#TODO convert read and write functions in to class functions
functions_code = {
'read_json':{'imports':[{'mod':'json'}],'code':"\\n\\
\\ndef read_json(file_path):\\
\\n data = None\\
\\n with open(file_path,'r') as f:\\
\\n data = json.load(f)\\
\\n return data\\
\\n"},
'write_json':{'imports':[{'mod':'json'}],'code':"\\n\\
\\ndef write_json(data, file_path):\\
\\n with open(file_path,'w') as f:\\
\\n json.dump(data, f)\\
\\n"},
'read_data':{'imports':[{'mod':'pandas','mod_as':'pd'}],'code':"\\n\\
\\ndef read_data(file_path, encoding='utf-8', sep=','):\\
\\n return pd.read_csv(file_path, encoding=encoding, sep=sep)\\
\\n"},
'write_data':{'imports':[{'mod':'pandas','mod_as':'pd'}],'code':"\\n\\
\\ndef write_data(data, file_path, index=False):\\
\\n return data.to_csv(file_path, index=index)\\
\\n\\
\\n#Uncomment and change below code for google storage\\
\\n#from google.cloud import storage\\
\\n#def write_data(data, file_path, index=False):\\
\\n# file_name= file_path.name\\
\\n# data.to_csv('output_data.csv')\\
\\n# storage_client = storage.Client()\\
\\n# bucket = storage_client.bucket('aion_data')\\
\\n# bucket.blob('prediction/'+file_name).upload_from_filename('output_data.csv', content_type='text/csv')\\
\\n# return data\\
\\n"},
'is_file_name_url':{'imports':[],'code':"\\n\\
\\ndef is_file_name |
_url(file_name):\\
\\n supported_urls_starts_with = ('gs://','https://','http://')\\
\\n return file_name.startswith(supported_urls_starts_with)\\
\\n"},
'logger_class':{'imports':[{'mod':'logging'}, {'mod':'io'}],'code':"\\n\\
\\nclass logger():\\
\\n #setup the logger\\
\\n def __init__(self, log_file, mode='w', logger_name=None):\\
\\n logging.basicConfig(filename=log_file, filemode=mode, format='%(asctime)s %(name)s- %(message)s', level=logging.INFO, datefmt='%d-%b-%y %H:%M:%S')\\
\\n self.log = logging.getLogger(logger_name)\\
\\n\\
\\n #get logger\\
\\n def getLogger(self):\\
\\n return self.log\\
\\n\\
\\n def info(self, msg):\\
\\n self.log.info(msg)\\
\\n\\
\\n def error(self, msg, exc_info=False):\\
\\n self.log.error(msg,exc_info)\\
\\n\\
\\n # format and log dataframe\\
\\n def log_dataframe(self, df, rows=2, msg=None):\\
\\n buffer = io.StringIO()\\
\\n df.info(buf=buffer)\\
\\n log_text = 'Data frame{}'.format(' after ' + msg + ':' if msg else ':')\\
\\n log_text += '\\\\n\\\\t'+str(df.head(rows)).replace('\\\\n','\\\\n\\\\t')\\
\\n log_text += ('\\\\n\\\\t' + buffer.getvalue().replace('\\\\n','\\\\n\\\\t'))\\
\\n self.log.info(log_text)\\
\\n"},
}
class utility_function():
def __init__(self, module):
if module in utility_functions.keys():
self.module_name = module
else:
self.module_name = None
self.importer = importModule()
self.codeText = ""
def get_code(self):
code = ""
if self.module_name:
functions = utility_functions[self.module_name]
for function in functions:
self.codeText += self.get_function_code(function)
code = self.importer.getCode()
code += self.codeText
return code
def get_function_code(self, name):
code = ""
if name in functions_code.keys():
code += functions_code[name]['code']
if self.importer:
if 'imports' in functions_code[name].keys():
for module in functions_code[name]['imports']:
mod_name = module['mod']
mod_from = module.get('mod_from', None)
mod_as = module.get('mod_as', None)
self.importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as)
return code
def get_importer(self):
return self.importer
if __name__ == '__main__':
obj = utility_function('load_data')
p = obj.get_utility_code()
print(p)<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from mlac.timeseries.core.imports import importModule
from mlac.timeseries.core.load_data import tabularDataReader
from mlac.timeseries.core.transformer import transformer as profiler
from mlac.timeseries.core.selector import selector
from mlac.timeseries.core.trainer import learner
from mlac.timeseries.core.register import register
from mlac.timeseries.core.deploy import deploy
from mlac.timeseries.core.drift_analysis import drift
from mlac.timeseries.core.functions import global_function
from mlac.timeseries.core.data_reader import data_reader
from mlac.timeseries.core.utility import utility_function
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class deploy():
def __init__(self, tab_size=4):
self.tab = ' ' * tab_size
self.codeText = ""
self.input_files = {}
self.output_files = {}
self.addInputFiles({'metaData' : 'modelMetaData.json','log':'predict.log'})
def addInputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def addOutputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def getInputFiles(self):
text = 'IOFiles = '
if not self.input_files:
text += '{ }'
else:
text += json.dumps(self.input_files, indent=4)
return text
def getOutputFiles(self):
text = 'output_file = '
if not self.output_files:
text += '{ }'
else:
text += json.dumps(self.output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\\n'
text += self.getInputFiles()
text += '\\n'
text += self.getOutputFiles()
if indent:
text = text.replace('\\n', self.tab * indent + '\\n')
return text
def addStatement(self, statement, indent=1):
pass
def getPredictionCodeModules(self):
modules = [{'module':'json'}
,{'module':'joblib'}
,{'module':'pandas', 'mod_as':'pd'}
,{'module':'numpy', 'mod_as':'np'}
,{'module':'Path', 'mod_from':'pathlib'}
,{'module':'json_normalize', 'mod_from':'pandas'}
,{'module':'load_model', 'mod_from':'tensorflow.keras.models'}
]
return modules
def addPredictionCode(self):
self.codeText += """
class deploy():
def __init__(self, base_config, log=None):
self.targetPath = (Path('aion') / base_config['targetPath']).resolve()
if log:
self.logger = log
else:
log_file = self.targetPath / IOFiles['log']
self.logger = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)
try:
self.initialize(base_config)
except Exception as e:
self.logger.error(e, exc_info=True)
def initialize(self, base_config):
targetPath = Path('aion') / base_config['targetPath']
meta_data_file = targetPath / IOFiles['metaData']
if meta_data_file.exists():
meta_data = utils.read_json(meta_data_file)
self.dateTimeFeature = meta_data['training']['dateTimeFeature']
self.targetFeature = meta_data['training']['target_feature']
normalization_file = meta_data['transformation']['Status']['Normalization_file']
self.normalizer = joblib.load(normalization_file)
self.lag_order = base_config['lag_order']
self.noofforecasts = base_config['noofforecasts']
run_id = str(meta_data['register']['runId'])
model_path = str(targetPath/'runs'/str(meta_data['register']['runId'])/meta_data['register']['model']/'model')
self.model = load_model(model_path)
self.model_name = meta_data['register']['model']
def predict(self, data=None):
try:
return self.__predict(data)
except Exception as e:
if self.logger:
self.logger.error(e, exc_info=True)
raise ValueError(json.dumps({'Status': 'Failure', 'Message': str(e)}))
def __predict(self, data=None):
jsonData = json.loads(data)
dataFrame = json_normalize(jsonData)
xtrain = dataFrame
if len(dataFrame) == 0:
raise ValueError('No data record found')
df_l = len(dataFrame)
pred_threshold = 0.1
max_pred_by_user = round((df_l) * pred_threshold)
# prediction for 24 steps or next 24 hours
if self.noofforecasts == -1:
self.noofforecasts = max_pred_by_user
no_of_prediction = self.noofforecasts
if (str(no_of_prediction) > str(max_pred_by_user)):
no_of_prediction = max_pred_by_user
noofforecasts = no_of_prediction
# self.sfeatures.remove(self.datetimeFeature)
features = self.targetFeature
if len(features) == 1:
xt = xtrain[features].values
else:
xt = xtrain[features].values
xt = xt.astype('float32')
xt = self.normalizer.transform(xt)
pred_data = xt
y_future = []
self.lag_order = int(self.lag_order)
for i in range(int(no_of_prediction)):
pdata = pred_data[-self.lag_order:]
if len(features) == 1:
pdata = pdata.reshape((1, self.lag_order))
else:
pdata = pdata.reshape((1, self.lag_order, len(features)))
if (len(features) > 1):
pred = self.model.predict(pdata)
predout = self.normalizer.inverse_transform(pred)
y_future.append(predout)
pred_data = np.append(pred_data, pred, axis=0)
else:
pred = self.model.predict(pdata)
predout = self.normalizer.inverse_transform(pred)
y_future.append(predout.flatten()[-1])
pred_data = np.append(pred_data, pred)
pred = pd.DataFrame(index=range(0, len(y_future)), columns=self.targetFeature)
for i in range(0, len(y_future)):
pred.iloc[i] = y_future[i]
predictions = pred
forecast_output = predictions.to_json(orient='records')
return forecast_output
"""
def getCode(self):
return self.codeText
def getServiceCode(self):
return """
from http.server import BaseHTTPRequestHandler,HTTPServer
from socketserver import ThreadingMixIn
import os
from os.path import expanduser
import platform
import threading
import subprocess
import argparse
import re
import cgi
import json
import shutil
import logging
import sys
import time
import seaborn as sns
from pathlib import Path
from predict import deploy
import pandas as pd
import scipy.stats as st
import numpy as np
import warnings
from utility import *
warnings.filterwarnings("ignore")
config_input = None
IOFiles = {
"inputData": "rawData.dat",
"metaData": "modelMetaData.json",
"production": "production.json",
"log": "aion.log",
"monitoring":"monitoring.json",
"prodData": "prodData",
"prodDataGT":"prodDataGT"
}
def DistributionFinder(data):
try:
distributionName = ""
sse = 0.0
KStestStatic = 0.0
dataType = ""
if (data.dtype == "float64" or data.dtype == "float32"):
dataType = "Continuous"
elif (data.dtype == "int"):
dataType = "Discrete"
elif (data.dtype == "int64"):
dataType = "Discrete"
if (dataType == "Discrete"):
distributions = [st.bernoulli, st.binom, st.geom, st.nbinom, st.poisson]
index, counts = np.unique(data.astype(int), return_counts=True)
if (len(index) >= 2):
best_sse = np.inf
y1 = []
total = sum(counts)
mean = float(sum(index * counts)) / total
variance = float((sum(index ** 2 * counts) - total * mean ** 2)) / (total - 1)
dispersion = mean / float(variance)
theta = 1 / float(dispersion)
r = mean * (float(theta) / 1 - theta)
for j in counts:
y1.append(float(j) / total)
pmf1 = st.bernoulli.pmf(index, mean)
pmf2 = st.binom.pmf(index, len(index), p=mean / len(index))
pmf3 = st.geom.pmf(index, 1 / float(1 + mean))
pmf4 = st.nbinom.pmf(index, mean, r)
pmf5 = st.poisson.pmf(index, mean)
sse1 = np.sum(np.power(y1 - pmf1, 2.0))
sse2 = np.sum(np.power(y1 - pmf2, 2.0))
sse3 = np.sum(np.power(y1 - pmf3, 2.0))
sse4 = np.sum(np.power(y1 - pmf4, 2.0))
sse5 = np.sum(np.power(y1 - pmf5, 2.0))
sselist = [sse1, sse2, sse3, sse4, sse5]
best_distribution = 'NA'
for i in range(0, len(sselist)):
if best_sse > sselist[i] > 0:
best_distribution = distributions[i].name
best_sse = sselist[i]
elif (len(index) == 1):
best_distribution = "Constant Data-No Distribution"
best_sse = 0.0
distributionName = best_distribution
sse = best_sse
elif (dataType == "Continuous"):
distributions = [st. |
uniform, st.expon, st.weibull_max, st.weibull_min, st.chi, st.norm, st.lognorm, st.t,
st.gamma, st.beta]
best_distribution = st.norm.name
best_sse = np.inf
datamin = data.min()
datamax = data.max()
nrange = datamax - datamin
y, x = np.histogram(data.astype(float), bins='auto', density=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
for distribution in distributions:
params = distribution.fit(data.astype(float))
arg = params[:-2]
loc = params[-2]
scale = params[-1]
pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)
sse = np.sum(np.power(y - pdf, 2.0))
if (best_sse > sse > 0):
best_distribution = distribution.name
best_sse = sse
distributionName = best_distribution
sse = best_sse
except:
response = str(sys.exc_info()[0])
message = 'Job has Failed' + response
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
print(message)
return distributionName, sse
def getDriftDistribution(feature, dataframe, newdataframe=pd.DataFrame()):
import matplotlib.pyplot as plt
import math
import io, base64, urllib
np.seterr(divide='ignore', invalid='ignore')
try:
plt.clf()
except:
pass
plt.rcParams.update({'figure.max_open_warning': 0})
sns.set(color_codes=True)
pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
if len(feature) > 4:
numneroffeatures = len(feature)
plt.figure(figsize=(10, numneroffeatures*2))
else:
plt.figure(figsize=(10,5))
for i in enumerate(feature):
dataType = dataframe[i[1]].dtypes
if dataType not in pandasNumericDtypes:
dataframe[i[1]] = pd.Categorical(dataframe[i[1]])
dataframe[i[1]] = dataframe[i[1]].cat.codes
dataframe[i[1]] = dataframe[i[1]].astype(int)
dataframe[i[1]] = dataframe[i[1]].fillna(dataframe[i[1]].mode()[0])
else:
dataframe[i[1]] = dataframe[i[1]].fillna(dataframe[i[1]].mean())
plt.subplots_adjust(hspace=0.5, wspace=0.7, top=1)
plt.subplot(math.ceil((len(feature) / 2)), 2, i[0] + 1)
distname, sse = DistributionFinder(dataframe[i[1]])
print(distname)
ax = sns.distplot(dataframe[i[1]], label=distname)
ax.legend(loc='best')
if newdataframe.empty == False:
dataType = newdataframe[i[1]].dtypes
if dataType not in pandasNumericDtypes:
newdataframe[i[1]] = pd.Categorical(newdataframe[i[1]])
newdataframe[i[1]] = newdataframe[i[1]].cat.codes
newdataframe[i[1]] = newdataframe[i[1]].astype(int)
newdataframe[i[1]] = newdataframe[i[1]].fillna(newdataframe[i[1]].mode()[0])
else:
newdataframe[i[1]] = newdataframe[i[1]].fillna(newdataframe[i[1]].mean())
distname, sse = DistributionFinder(newdataframe[i[1]])
print(distname)
ax = sns.distplot(newdataframe[i[1]],label=distname)
ax.legend(loc='best')
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
string = base64.b64encode(buf.read())
uri = urllib.parse.quote(string)
return uri
def read_json(file_path):
data = None
with open(file_path,'r') as f:
data = json.load(f)
return data
class HTTPRequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
print('PYTHON ######## REQUEST ####### STARTED')
if None != re.search('/AION/', self.path) or None != re.search('/aion/', self.path):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
if ctype == 'application/json':
length = int(self.headers.get('content-length'))
data = self.rfile.read(length)
usecase = self.path.split('/')[-2]
if usecase.lower() == config_input['targetPath'].lower():
operation = self.path.split('/')[-1]
data = json.loads(data)
dataStr = json.dumps(data)
if operation.lower() == 'predict':
output=deployobj.predict(dataStr)
resp = output
elif operation.lower() == 'groundtruth':
gtObj = groundtruth(config_input)
output = gtObj.actual(dataStr)
resp = output
elif operation.lower() == 'delete':
targetPath = Path('aion')/config_input['targetPath']
for file in data:
x = targetPath/file
if x.exists():
os.remove(x)
resp = json.dumps({'Status':'Success'})
else:
outputStr = json.dumps({'Status':'Error','Msg':'Operation not supported'})
resp = outputStr
else:
outputStr = json.dumps({'Status':'Error','Msg':'Wrong URL'})
resp = outputStr
else:
outputStr = json.dumps({'Status':'ERROR','Msg':'Content-Type Not Present'})
resp = outputStr
resp=resp+'\\\\n'
resp=resp.encode()
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(resp)
else:
print('python ==> else1')
self.send_response(403)
self.send_header('Content-Type', 'application/json')
self.end_headers()
print('PYTHON ######## REQUEST ####### ENDED')
return
def do_GET(self):
print('PYTHON ######## REQUEST ####### STARTED')
if None != re.search('/AION/', self.path) or None != re.search('/aion/', self.path):
usecase = self.path.split('/')[-2]
self.send_response(200)
self.targetPath = Path('aion')/config_input['targetPath']
meta_data_file = self.targetPath/IOFiles['metaData']
if meta_data_file.exists():
meta_data = read_json(meta_data_file)
else:
raise ValueError(f'Configuration file not found: {meta_data_file}')
production_file = self.targetPath/IOFiles['production']
if production_file.exists():
production_data = read_json(production_file)
else:
raise ValueError(f'Production Details not found: {production_file}')
operation = self.path.split('/')[-1]
if (usecase.lower() == config_input['targetPath'].lower()) and (operation.lower() == 'metrices'):
self.send_header('Content-Type', 'text/html')
self.end_headers()
ModelString = production_data['Model']
ModelPerformance = ModelString+'_performance.json'
performance_file = self.targetPath/ModelPerformance
if performance_file.exists():
performance_data = read_json(performance_file)
else:
raise ValueError(f'Production Details not found: {performance_data}')
Scoring_Creteria = performance_data['scoring_criteria']
train_score = round(performance_data['metrices']['train_score'],2)
test_score = round(performance_data['metrices']['test_score'],2)
current_score = 'NA'
monitoring = read_json(self.targetPath/IOFiles['monitoring'])
reader = dataReader(reader_type=monitoring['prod_db_type'],target_path=self.targetPath, config=monitoring['db_config'])
inputDatafile = self.targetPath/IOFiles['inputData']
NoOfPrediction = 0
NoOfGroundTruth = 0
inputdistribution = ''
if reader.file_exists(IOFiles['prodData']):
dfPredict = reader.read(IOFiles['prodData'])
dfinput = pd.read_csv(inputDatafile)
features = meta_data['training']['features']
inputdistribution = getDriftDistribution(features,dfinput,dfPredict)
NoOfPrediction = len(dfPredict)
if reader.file_exists(IOFiles['prodDataGT']):
dfGroundTruth = reader.read(IOFiles['prodDataGT'])
NoOfGroundTruth = len(dfGroundTruth)
common_col = [k for k in dfPredict.columns.tolist() if k in dfGroundTruth.columns.tolist()]
proddataDF = pd.merge(dfPredict, dfGroundTruth, on =common_col,how = 'inner')
if Scoring_Creteria.lower() == 'accuracy':
from sklearn.metrics import accuracy_score
current_score = accuracy_score(proddataDF[config_input['target_feature']], proddataDF['prediction'])
current_score = round((current_score*100),2)
elif Scoring_Creteria.lower() == 'recall':
from sklearn.metrics import accuracy_score
current_score = recall_score(proddataDF[config_input['target_feature']], proddataDF['prediction'],average='macro')
current_score = round((current_score*100),2)
msg = \\"""<html>
<head>
<title>Performance Details</title>
</head>
<style>
table, th, td {border}
</style>
<body>
<h2><b>Deployed Model:</b>{ModelString}</h2>
<br/>
<table style="width:50%">
<tr>
<td>No of Prediction</td>
<td>{NoOfPrediction}</td>
</tr>
<tr>
<td>No of GroundTruth</td>
<td>{NoOfGroundTruth}</td>
</tr>
</table>
<br/>
<table style="width:100%">
<tr>
<th>Score Type</th>
<th>Train Score</th>
<th>Test Score</th>
<th>Production Score</th>
</tr>
<tr>
<td>{Scoring_Creteria}</td>
<td>{train_score}</td>
<td>{test_score}</td>
<td>{current_score}</td>
</tr>
</table>
<br/>
<br/>
<img src="data:image/png;base64,{newDataDrift}" alt="" >
</body>
</html>
\\""".format(border='{border: 1px solid black;}',ModelString=ModelString,Scoring_Creteria=Scoring_Creteria,NoOfPrediction=NoOfPrediction,NoOfGroundTruth=NoOfGroundTruth,train_score=train_score,test_score=test_score,current_score=current_score,newDataDrift=inputdistribution)
elif (usecase.lower() == config_input['targetPath'].lower()) and (operation.lower() == 'logs'):
self.send_header('Content-Type', 'text/plain')
self.end_headers()
log_file = self.targetPath/IOFiles['log']
if log_file.exists():
with open(log_file) as f:
msg = f.read()
f.close()
else:
raise ValueError(f'Log Details not found: {log_file}')
else:
self.send_header('Content-Type', 'application/json')
self.end_headers()
features = meta_data['load_data']['selected_features']
bodydes='['
for x in features:
if bodydes != '[':
bodydes = bodydes+','
bodydes = bodydes+'{"'+x+'":"value"}'
bodydes+=']'
urltext = '/AION/'+config_input['targetPath']+'/predict'
urltextgth='/AION/'+config_input['targetPath']+'/groundtruth'
urltextproduction='/AION/'+config_input['targetPath']+'/metrices'
msg=\\"""
|
Version:{modelversion}
RunNo: {runNo}
URL for Prediction
==================
URL:{url}
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
Output: prediction,probability(if Applicable),remarks corresponding to each row.
URL for GroundTruth
===================
URL:{urltextgth}
RequestType: POST
Content-Type=application/json
Note: Make Sure that one feature (ID) should be unique in both predict and groundtruth. Otherwise outputdrift will not work
URL for Model In Production Analysis
====================================
URL:{urltextproduction}
RequestType: GET
Content-Type=application/json
\\""".format(modelversion=config_input['modelVersion'],runNo=config_input['deployedRunNo'],url=urltext,urltextgth=urltextgth,urltextproduction=urltextproduction,displaymsg=bodydes)
self.wfile.write(msg.encode())
else:
self.send_response(403)
self.send_header('Content-Type', 'application/json')
self.end_headers()
return
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
allow_reuse_address = True
def shutdown(self):
self.socket.close()
HTTPServer.shutdown(self)
class file_status():
def __init__(self, reload_function, params, file, logger):
self.files_status = {}
self.initializeFileStatus(file)
self.reload_function = reload_function
self.params = params
self.logger = logger
def initializeFileStatus(self, file):
self.files_status = {'path': file, 'time':file.stat().st_mtime}
def is_file_changed(self):
if self.files_status['path'].stat().st_mtime > self.files_status['time']:
self.files_status['time'] = self.files_status['path'].stat().st_mtime
return True
return False
def run(self):
global config_input
while( True):
time.sleep(30)
if self.is_file_changed():
production_details = targetPath/IOFiles['production']
if not production_details.exists():
raise ValueError(f'Model in production details does not exist')
productionmodel = read_json(production_details)
config_file = Path(__file__).parent/'config.json'
if not Path(config_file).exists():
raise ValueError(f'Config file is missing: {config_file}')
config_input = read_json(config_file)
config_input['deployedModel'] = productionmodel['Model']
config_input['deployedRunNo'] = productionmodel['runNo']
self.logger.info('Model changed Reloading.....')
self.logger.info(f'Model: {config_input["deployedModel"]}')
self.logger.info(f'Version: {str(config_input["modelVersion"])}')
self.logger.info(f'runNo: {str(config_input["deployedRunNo"])}')
self.reload_function(config_input)
class SimpleHttpServer():
def __init__(self, ip, port, model_file_path,reload_function,params, logger):
self.server = ThreadedHTTPServer((ip,port), HTTPRequestHandler)
self.status_checker = file_status( reload_function, params, model_file_path, logger)
def start(self):
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
self.status_thread = threading.Thread(target=self.status_checker.run)
self.status_thread.start()
def waitForThread(self):
self.server_thread.join()
self.status_thread.join()
def stop(self):
self.server.shutdown()
self.waitForThread()
if __name__=='__main__':
parser = argparse.ArgumentParser(description='HTTP Server')
parser.add_argument('-ip','--ipAddress', help='HTTP Server IP')
parser.add_argument('-pn','--portNo', type=int, help='Listening port for HTTP Server')
args = parser.parse_args()
config_file = Path(__file__).parent/'config.json'
if not Path(config_file).exists():
raise ValueError(f'Config file is missing: {config_file}')
config = read_json(config_file)
if args.ipAddress:
config['ipAddress'] = args.ipAddress
if args.portNo:
config['portNo'] = args.portNo
targetPath = Path('aion')/config['targetPath']
if not targetPath.exists():
raise ValueError(f'targetPath does not exist')
production_details = targetPath/IOFiles['production']
if not production_details.exists():
raise ValueError(f'Model in production details does not exist')
productionmodel = read_json(production_details)
config['deployedModel'] = productionmodel['Model']
config['deployedRunNo'] = productionmodel['runNo']
#server = SimpleHttpServer(config['ipAddress'],int(config['portNo']))
config_input = config
logging.basicConfig(filename= Path(targetPath)/IOFiles['log'], filemode='a', format='%(asctime)s %(name)s- %(message)s', level=logging.INFO, datefmt='%d-%b-%y %H:%M:%S')
logger = logging.getLogger(Path(__file__).parent.name)
deployobj = deploy(config_input, logger)
server = SimpleHttpServer(config['ipAddress'],int(config['portNo']),targetPath/IOFiles['production'],deployobj.initialize,config_input, logger)
logger.info('HTTP Server Running...........')
logger.info(f"IP Address: {config['ipAddress']}")
logger.info(f"Port No.: {config['portNo']}")
print('HTTP Server Running...........')
print('For Prediction')
print('================')
print('Request Type: Post')
print('Content-Type: application/json')
print('URL: /AION/'+config['targetPath']+'/predict')
print('\\\\nFor GroundTruth')
print('================')
print('Request Type: Post')
print('Content-Type: application/json')
print('URL: /AION/'+config['targetPath']+'/groundtruth')
print('\\\\nFor Help')
print('================')
print('Request Type: Get')
print('Content-Type: application/json')
print('URL: /AION/'+config['targetPath']+'/help')
print('\\\\nFor Model In Production Analysis')
print('================')
print('Request Type: Get')
print('Content-Type: application/json')
print('URL: /AION/'+config['targetPath']+'/metrices')
server.start()
server.waitForThread()
"""<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
class global_function():
def __init__(self, tab_size=4):
self.tab = ' ' * tab_size
self.codeText = ""
self.available_functions = {
'iqr':{'name':'iqrOutlier','code':f"\\n\\ndef iqrOutlier(df):\\
\\n{self.tab}Q1 = df.quantile(0.25)\\
\\n{self.tab}Q3 = df.quantile(0.75)\\
\\n{self.tab}IQR = Q3 - Q1\\
\\n{self.tab}index = ~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR))).any(axis=1)\\
\\n{self.tab}return index"},
'zscore':{'name':'zscoreOutlier','imports':[{'mod':'stats','mod_from':'scipy'},{'mod':'numpy'}],'code':f"\\n\\ndef zscoreOutlier(df):\\
\\n{self.tab}z = numpy.abs(stats.zscore(df))\\
\\n{self.tab}index = (z < 3).all(axis=1)\\
\\n{self.tab}return index"},
'iforest':{'name':'iforestOutlier','imports':[{'mod':'IsolationForest','mod_from':'sklearn.ensemble'}],'code':f"\\n\\ndef iforestOutlier(df):\\
\\n{self.tab}from sklearn.ensemble import IsolationForest\\
\\n{self.tab}isolation_forest = IsolationForest(n_estimators=100)\\
\\n{self.tab}isolation_forest.fit(df)\\
\\n{self.tab}y_pred_train = isolation_forest.predict(df)\\
\\n{self.tab}return y_pred_train == 1"},
'minMaxImputer':{'name':'minMaxImputer','code':f"\\n\\nclass minMaxImputer(TransformerMixin):\\
\\n{self.tab}def __init__(self, strategy='max'):\\
\\n{self.tab}{self.tab}self.strategy = strategy\\
\\n{self.tab}def fit(self, X, y=None):\\
\\n{self.tab}{self.tab}self.feature_names_in_ = X.columns\\
\\n{self.tab}{self.tab}if self.strategy == 'min':\\
\\n{self.tab}{self.tab}{self.tab}self.statistics_ = X.min()\\
\\n{self.tab}{self.tab}else:\\
\\n{self.tab}{self.tab}{self.tab}self.statistics_ = X.max()\\
\\n{self.tab}{self.tab}return self\\
\\n{self.tab}def transform(self, X):\\
\\n{self.tab}{self.tab}import numpy\\
\\n{self.tab}{self.tab}return numpy.where(X.isna(), self.statistics_, X)"},
'DummyEstimator':{'name':'DummyEstimator','code':f"\\n\\nclass DummyEstimator(BaseEstimator):\\
\\n{self.tab}def fit(self): pass\\
\\n{self.tab}def score(self): pass"},
'start_reducer':{'name':'start_reducer','code':"\\n\\
\\ndef start_reducer(df,target_feature,corr_threshold=0.85,var_threshold=0.05):\\
\\n import numpy as np\\
\\n import pandas as pd\\
\\n import itertools\\
\\n from sklearn.feature_selection import VarianceThreshold\\
\\n\\
\\n train_features = df.columns.tolist()\\
\\n train_features.remove(target_feature)\\
\\n df = df.loc[:, (df != df.iloc[0]).any()] #remove constant feature\\
\\n numeric_features = df.select_dtypes(include='number').columns.tolist()\\
\\n non_numeric_features = df.select_dtypes(exclude='number').columns.tolist()\\
\\n if numeric_features and var_threshold:\\
\\n qconstantFilter = VarianceThreshold(threshold=var_threshold)\\
\\n tempDf=df[numeric_features]\\
\\n qconstantFilter.fit(tempDf)\\
\\n numeric_features = [x for x,y in zip(numeric_features,qconstantFilter.get_support()) if y]\\
\\n if numeric_features:\\
\\n numColPairs = list(itertools.product(numeric_features, numeric_features))\\
\\n for item in numColPairs:\\
\\n if(item[0] == item[1]):\\
\\n numColPairs.remove(item)\\
\\n tempArray = []\\
\\n for item in numColPairs:\\
\\n tempCorr = np.abs(df[item[0]].corr(df[item[1]]))\\
\\n if(tempCorr > corr_threshold):\\
\\n tempArray.append(item[0])\\
\\n tempArray = np.unique(tempArray).tolist()\\
\\n nonsimilarNumericalCols = list(set(numeric_features) - set(tempArray))\\
\\n groupedFeatures = []\\
\\n if tempArray:\\
\\n corrDic = {}\\
\\n for feature in tempArray:\\
\\n temp = []\\
\\n for col in tempArray:\\
\\n tempCorr = np.abs(df[feature].corr(df[col]))\\
\\n temp.append(tempCorr)\\
\\n corrDic[feature] = temp\\
\\n #Similar correlation df\\
\\n corrDF = pd.DataFrame(corrDic,index = tempArray)\\
\\n corrDF.loc[:,:] = np.tril(corrDF, k=-1)\\
\\n alreadyIn = set()\\
\\n similarFeatures = []\\
\\n for col in corrDF:\\
\\n perfectCorr = corrDF[col][corrDF[col] > corr_threshold].index.tolist()\\
\\n if perfectCorr and col not in alreadyIn:\\
\\n alreadyIn.update(set(perfectCorr))\\
\\n perfectCorr.append(col)\\
\\n similarFeatures.append(perfectCorr)\\
\\n updatedSimFeatures = []\\
\\n for items in similarFeatures:\\
\\n if(target_feature != '' and target_feature in items):\\
\\n for p in items:\\
\\n updatedSimFeatures.append(p)\\
\\n else:\\
\\n updatedSimFeatures.append(items[0])\\
\\n newTempFeatures = list(set(updatedSimFeatures + nonsimilarNumericalCols))\\
\\n updatedFeatures = list(set(newTempFeatures + non_numeric_features))\\
\\n else:\\
\\n updatedFeatures = list(set(columns) - set(constFeatures)-set(qconstantColumns))\\
\\n else:\\
|
\\n updatedFeatures = list(set(columns) - set(constFeatures)-set(qconstantColumns))\\
\\n return updatedFeatures"},
'feature_importance_class':{'name':'feature_importance_class','code':"\\n\\
\\ndef feature_importance_class(df, numeric_features, cat_features,target_feature,pValTh,corrTh):\\
\\n import pandas as pd\\
\\n from sklearn.feature_selection import chi2\\
\\n from sklearn.feature_selection import f_classif\\
\\n from sklearn.feature_selection import mutual_info_classif\\
\\n \\
\\n impFeatures = []\\
\\n if cat_features:\\
\\n categoricalData=df[cat_features]\\
\\n chiSqCategorical=chi2(categoricalData,df[target_feature])[1]\\
\\n corrSeries=pd.Series(chiSqCategorical, index=cat_features)\\
\\n impFeatures.append(corrSeries[corrSeries<pValTh].index.tolist())\\
\\n if numeric_features:\\
\\n quantData=df[numeric_features]\\
\\n fclassScore=f_classif(quantData,df[target_feature])[1]\\
\\n miClassScore=mutual_info_classif(quantData,df[target_feature])\\
\\n fClassSeries=pd.Series(fclassScore,index=numeric_features)\\
\\n miClassSeries=pd.Series(miClassScore,index=numeric_features)\\
\\n impFeatures.append(fClassSeries[fClassSeries<pValTh].index.tolist())\\
\\n impFeatures.append(miClassSeries[miClassSeries>corrTh].index.tolist())\\
\\n pearsonScore=df.corr() \\
\\n targetPScore=abs(pearsonScore[target_feature])\\
\\n impFeatures.append(targetPScore[targetPScore<pValTh].index.tolist())\\
\\n return list(set(sum(impFeatures, [])))"},
'feature_importance_reg':{'name':'feature_importance_reg','code':"\\n\\
\\ndef feature_importance_reg(df, numeric_features, target_feature,pValTh,corrTh):\\
\\n import pandas as pd\\
\\n from sklearn.feature_selection import f_regression\\
\\n from sklearn.feature_selection import mutual_info_regression\\
\\n \\
\\n impFeatures = []\\
\\n if numeric_features:\\
\\n quantData =df[numeric_features]\\
\\n fregScore=f_regression(quantData,df[target_feature])[1]\\
\\n miregScore=mutual_info_regression(quantData,df[target_feature])\\
\\n fregSeries=pd.Series(fregScore,index=numeric_features)\\
\\n miregSeries=pd.Series(miregScore,index=numeric_features)\\
\\n impFeatures.append(fregSeries[fregSeries<pValTh].index.tolist())\\
\\n impFeatures.append(miregSeries[miregSeries>corrTh].index.tolist())\\
\\n pearsonScore=df.corr()\\
\\n targetPScore=abs(pearsonScore[target_feature])\\
\\n impFeatures.append(targetPScore[targetPScore<pValTh].index.tolist())\\
\\n return list(set(sum(impFeatures, [])))"},
'scoring_criteria':{'name':'scoring_criteria','imports':[{'mod':'make_scorer','mod_from':'sklearn.metrics'},{'mod':'roc_auc_score','mod_from':'sklearn.metrics'}], 'code':"\\n\\
\\ndef scoring_criteria(score_param, problem_type, class_count):\\
\\n if problem_type == 'classification':\\
\\n scorer_mapping = {\\
\\n 'recall':{'binary_class': 'recall', 'multi_class': 'recall_weighted'},\\
\\n 'precision':{'binary_class': 'precision', 'multi_class': 'precision_weighted'},\\
\\n 'f1_score':{'binary_class': 'f1', 'multi_class': 'f1_weighted'},\\
\\n 'roc_auc':{'binary_class': 'roc_auc', 'multi_class': 'roc_auc_ovr_weighted'}\\
\\n }\\
\\n if (score_param.lower() == 'roc_auc') and (class_count > 2):\\
\\n score_param = make_scorer(roc_auc_score, needs_proba=True,multi_class='ovr',average='weighted')\\
\\n else:\\
\\n class_type = 'binary_class' if class_count == 2 else 'multi_class'\\
\\n if score_param in scorer_mapping.keys():\\
\\n score_param = scorer_mapping[score_param][class_type]\\
\\n else:\\
\\n score_param = 'accuracy'\\
\\n return score_param"},
'log_dataframe':{'name':'log_dataframe','code':f"\\n\\
\\ndef log_dataframe(df, msg=None):\\
\\n import io\\
\\n buffer = io.StringIO()\\
\\n df.info(buf=buffer)\\
\\n if msg:\\
\\n log_text = f'Data frame after {{msg}}:'\\
\\n else:\\
\\n log_text = 'Data frame:'\\
\\n log_text += '\\\\n\\\\t'+str(df.head(2)).replace('\\\\n','\\\\n\\\\t')\\
\\n log_text += ('\\\\n\\\\t' + buffer.getvalue().replace('\\\\n','\\\\n\\\\t'))\\
\\n get_logger().info(log_text)"},
'BayesSearchCV':{'name':'BayesSearchCV','imports':[{'mod':'cross_val_score','mod_from':'sklearn.model_selection'},{'mod':'fmin','mod_from':'hyperopt'},{'mod':'tpe','mod_from':'hyperopt'},{'mod':'hp','mod_from':'hyperopt'},{'mod':'STATUS_OK','mod_from':'hyperopt'},{'mod':'Trials','mod_from':'hyperopt'},{'mod':'numpy','mod_as':'np'}],'code':"\\n\\
\\nclass BayesSearchCV():\\
\\n\\
\\n def __init__(self, estimator, params, scoring, n_iter, cv):\\
\\n self.estimator = estimator\\
\\n self.params = params\\
\\n self.scoring = scoring\\
\\n self.iteration = n_iter\\
\\n self.cv = cv\\
\\n self.best_estimator_ = None\\
\\n self.best_score_ = None\\
\\n self.best_params_ = None\\
\\n\\
\\n def __min_fun(self, params):\\
\\n score=cross_val_score(self.estimator, self.X, self.y,scoring=self.scoring,cv=self.cv)\\
\\n acc = score.mean()\\
\\n return {'loss':-acc,'score': acc, 'status': STATUS_OK,'model' :self.estimator,'params': params}\\
\\n\\
\\n def fit(self, X, y):\\
\\n trials = Trials()\\
\\n self.X = X\\
\\n self.y = y\\
\\n best = fmin(self.__min_fun,self.params,algo=tpe.suggest, max_evals=self.iteration, trials=trials)\\
\\n result = sorted(trials.results, key = lambda x: x['loss'])[0]\\
\\n self.best_estimator_ = result['model']\\
\\n self.best_score_ = result['score']\\
\\n self.best_params_ = result['params']\\
\\n self.best_estimator_.fit(X, y)\\
\\n\\
\\n def hyperOptParamConversion( paramSpace):\\
\\n paramDict = {}\\
\\n for j in list(paramSpace.keys()):\\
\\n inp = paramSpace[j]\\
\\n isLog = False\\
\\n isLin = False\\
\\n isRan = False\\
\\n isList = False\\
\\n isString = False\\
\\n try:\\
\\n # check if functions are given as input and reassign paramspace\\
\\n v = paramSpace[j]\\
\\n if 'logspace' in paramSpace[j]:\\
\\n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\\
\\n isLog = True\\
\\n elif 'linspace' in paramSpace[j]:\\
\\n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\\
\\n isLin = True\\
\\n elif 'range' in paramSpace[j]:\\
\\n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\\
\\n isRan = True\\
\\n elif 'list' in paramSpace[j]:\\
\\n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\\
\\n isList = True\\
\\n elif '[' and ']' in paramSpace[j]:\\
\\n paramSpace[j] = v.split('[')[1].split(']')[0].replace(' ', '')\\
\\n isList = True\\
\\n x = paramSpace[j].split(',')\\
\\n except:\\
\\n x = paramSpace[j]\\
\\n str_arg = paramSpace[j]\\
\\n\\
\\n # check if arguments are string\\
\\n try:\\
\\n test = eval(x[0])\\
\\n except:\\
\\n isString = True\\
\\n\\
\\n if isString:\\
\\n paramDict.update({j: hp.choice(j, x)})\\
\\n else:\\
\\n res = eval(str_arg)\\
\\n if isLin:\\
\\n y = eval('np.linspace' + str(res))\\
\\n paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))})\\
\\n elif isLog:\\
\\n y = eval('np.logspace' + str(res))\\
\\n paramDict.update(\\
\\n {j: hp.uniform(j, 10 ** eval(x[0]), 10 ** eval(x[1]))})\\
\\n elif isRan:\\
\\n y = eval('np.arange' + str(res))\\
\\n paramDict.update({j: hp.choice(j, y)})\\
\\n # check datatype of argument\\
\\n elif isinstance(eval(x[0]), bool):\\
\\n y = list(map(lambda i: eval(i), x))\\
\\n paramDict.update({j: hp.choice(j, eval(str(y)))})\\
\\n elif isinstance(eval(x[0]), float):\\
\\n res = eval(str_arg)\\
\\n if len(str_arg.split(',')) == 3 and not isList:\\
\\n y = eval('np.linspace' + str(res))\\
\\n #print(y)\\
\\n paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))})\\
\\n else:\\
\\n y = list(res) if isinstance(res, tuple) else [res]\\
\\n paramDict.update({j: hp.choice(j, y)})\\
\\n else:\\
\\n res = eval(str_arg)\\
\\n if len(str_arg.split(',')) == 3 and not isList:\\
\\n y = eval('np.linspace' +str(res)) if eval(x[2]) >= eval(x[1]) else eval('np.arange'+str(res))\\
\\n else:\\
\\n y = list(res) if isinstance(res, tuple) else [res]\\
\\n paramDict.update({j: hp.choice(j, y)})\\
\\n return paramDict"},
's2n':{'name':'s2n','imports':[{'mod':'word2number','mod_as':'w2n'},{'mod':'numpy','mod_as':'np'}],'code':"\\n\\
\\ndef s2n(value):\\
\\n try:\\
\\n x=eval(value)\\
\\n return x\\
\\n except:\\
\\n try:\\
\\n return w2n.word_to_num(value)\\
\\n except:\\
\\n return np.nan"},
'readWrite':{'name':'readWrite','imports':[{'mod':'json'},{'mod':'pandas','mod_as':'pd'}],'code':"\\n\\
\\ndef read_json(file_path):\\
\\n data = None\\
\\n with open(file_path,'r') as f:\\
\\n data = json.load(f)\\
\\n return data\\
\\n\\
\\ndef write_json(data, file_path):\\
\\n with open(file_path,'w') as f:\\
\\n json.dump(data, f)\\
\\n\\
\\ndef read_data(file_path, encoding='utf-8', sep=','):\\
\\n return pd.read_csv(file_path, encoding=encoding, sep=sep)\\
\\n\\
\\ndef write_data(data, file_path, index=False):\\
\\n return data.to_csv(file_path, index=index)\\
\\n\\
\\n#Uncomment and change below code for google storage\\
\\n#def write_data(data, file_path, index=False):\\
\\n# file_name= file_path.name\\
\\n# data.to_csv('output_data.csv')\\
\\n# storage_client = storage.Client()\\
\\n# bucket = storage_client.bucket('aion_data')\\
\\n# bucket.blob |
('prediction/'+file_name).upload_from_filename('output_data.csv', content_type='text/csv')\\
\\n# return data\\
\\n\\
\\ndef is_file_name_url(file_name):\\
\\n supported_urls_starts_with = ('gs://','https://','http://')\\
\\n return file_name.startswith(supported_urls_starts_with)\\
\\n"},
'logger':{'name':'set_logger','imports':[{'mod':'logging'}],'code':f"\\n\\
\\nlog = None\\
\\ndef set_logger(log_file, mode='a'):\\
\\n global log\\
\\n logging.basicConfig(filename=log_file, filemode=mode, format='%(asctime)s %(name)s- %(message)s', level=logging.INFO, datefmt='%d-%b-%y %H:%M:%S')\\
\\n log = logging.getLogger(Path(__file__).parent.name)\\
\\n return log\\
\\n\\
\\ndef get_logger():\\
\\n return log\\n"},
'mlflowSetPath':{'name':'mlflowSetPath','code':f"\\n\\ndef mlflowSetPath(path, name):\\
\\n{self.tab}db_name = str(Path(path)/'mlruns')\\
\\n{self.tab}mlflow.set_tracking_uri('file:///' + db_name)\\
\\n{self.tab}mlflow.set_experiment(str(Path(path).name))\\
\\n"},
'mlflow_create_experiment':{'name':'mlflow_create_experiment','code':f"\\n\\ndef mlflow_create_experiment(config, path, name):\\
\\n{self.tab}tracking_uri, artifact_uri, registry_uri = get_mlflow_uris(config, path)\\
\\n{self.tab}mlflow.tracking.set_tracking_uri(tracking_uri)\\
\\n{self.tab}mlflow.tracking.set_registry_uri(registry_uri)\\
\\n{self.tab}client = mlflow.tracking.MlflowClient()\\
\\n{self.tab}experiment = client.get_experiment_by_name(name)\\
\\n{self.tab}if experiment:\\
\\n{self.tab}{self.tab}experiment_id = experiment.experiment_id\\
\\n{self.tab}else:\\
\\n{self.tab}{self.tab}experiment_id = client.create_experiment(name, artifact_uri)\\
\\n{self.tab}return client, experiment_id\\
\\n"},
'get_mlflow_uris':{'name':'get_mlflow_uris','code':f"\\n\\ndef get_mlflow_uris(config, path):\\
\\n artifact_uri = None\\
\\n tracking_uri_type = config.get('tracking_uri_type',None)\\
\\n if tracking_uri_type == 'localDB':\\
\\n tracking_uri = 'sqlite:///' + str(path.resolve()/'mlruns.db')\\
\\n elif tracking_uri_type == 'server' and config.get('tracking_uri', None):\\
\\n tracking_uri = config['tracking_uri']\\
\\n if config.get('artifacts_uri', None):\\
\\n if Path(config['artifacts_uri']).exists():\\
\\n artifact_uri = 'file:' + config['artifacts_uri']\\
\\n else:\\
\\n artifact_uri = config['artifacts_uri']\\
\\n else:\\
\\n artifact_uri = 'file:' + str(path.resolve()/'mlruns')\\
\\n else:\\
\\n tracking_uri = 'file:' + str(path.resolve()/'mlruns')\\
\\n artifact_uri = None\\
\\n if config.get('registry_uri', None):\\
\\n registry_uri = config['registry_uri']\\
\\n else:\\
\\n registry_uri = 'sqlite:///' + str(path.resolve()/'registry.db')\\
\\n return tracking_uri, artifact_uri, registry_uri\\
\\n"},
'logMlflow':{'name':'logMlflow','code':f"\\n\\ndef logMlflow( params, metrices, estimator,tags={{}}, algoName=None):\\
\\n{self.tab}run_id = None\\
\\n{self.tab}for k,v in params.items():\\
\\n{self.tab}{self.tab}mlflow.log_param(k, v)\\
\\n{self.tab}for k,v in metrices.items():\\
\\n{self.tab}{self.tab}mlflow.log_metric(k, v)\\
\\n{self.tab}if 'CatBoost' in algoName:\\
\\n{self.tab}{self.tab}model_info = mlflow.catboost.log_model(estimator, 'model')\\
\\n{self.tab}else:\\
\\n{self.tab}{self.tab}model_info = mlflow.sklearn.log_model(sk_model=estimator, artifact_path='model')\\
\\n{self.tab}tags['processed'] = 'no'\\
\\n{self.tab}tags['registered'] = 'no'\\
\\n{self.tab}mlflow.set_tags(tags)\\
\\n{self.tab}if model_info:\\
\\n{self.tab}{self.tab}run_id = model_info.run_id\\
\\n{self.tab}return run_id\\
\\n"},
'classification_metrices':{'name':'classification_metrices','imports':[{'mod':'sklearn'},{'mod':'math'}],'code':"\\ndef get_classification_metrices( actual_values, predicted_values):\\
\\n result = {}\\
\\n accuracy_score = sklearn.metrics.accuracy_score(actual_values, predicted_values)\\
\\n avg_precision = sklearn.metrics.precision_score(actual_values, predicted_values,\\
\\n average='macro')\\
\\n avg_recall = sklearn.metrics.recall_score(actual_values, predicted_values,\\
\\n average='macro')\\
\\n avg_f1 = sklearn.metrics.f1_score(actual_values, predicted_values,\\
\\n average='macro')\\
\\n\\
\\n result['accuracy'] = math.floor(accuracy_score*10000)/100\\
\\n result['precision'] = math.floor(avg_precision*10000)/100\\
\\n result['recall'] = math.floor(avg_recall*10000)/100\\
\\n result['f1'] = math.floor(avg_f1*10000)/100\\
\\n return result\\
\\n"},
'regression_metrices':{'name':'regression_metrices','imports':[{'mod':'numpy', 'mod_as':'np'}],'code':"\\ndef get_regression_metrices( actual_values, predicted_values):\\
\\n result = {}\\
\\n\\
\\n me = np.mean(predicted_values - actual_values)\\
\\n sde = np.std(predicted_values - actual_values, ddof = 1)\\
\\n\\
\\n abs_err = np.abs(predicted_values - actual_values)\\
\\n mae = np.mean(abs_err)\\
\\n sdae = np.std(abs_err, ddof = 1)\\
\\n\\
\\n abs_perc_err = 100.*np.abs(predicted_values - actual_values) / actual_values\\
\\n mape = np.mean(abs_perc_err)\\
\\n sdape = np.std(abs_perc_err, ddof = 1)\\
\\n\\
\\n result['mean_error'] = me\\
\\n result['mean_abs_error'] = mae\\
\\n result['mean_abs_perc_error'] = mape\\
\\n result['error_std'] = sde\\
\\n result['abs_error_std'] = sdae\\
\\n result['abs_perc_error_std'] = sdape\\
\\n return result\\
\\n"}
}
def add_function(self, name, importer=None):
if name in self.available_functions.keys():
self.codeText += self.available_functions[name]['code']
if importer:
if 'imports' in self.available_functions[name].keys():
for module in self.available_functions[name]['imports']:
mod_name = module['mod']
mod_from = module.get('mod_from', None)
mod_as = module.get('mod_as', None)
importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as)
def get_function_name(self, name):
if name in self.available_functions.keys():
return self.available_functions[name]['name']
return None
def getCode(self):
return self.codeText
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from importlib.metadata import version
import sys
class importModule():
def __init__(self):
self.importModule = {}
self.stdlibModule = []
self.localModule = {}
def addLocalModule(self,module, mod_from=None, mod_as=None):
if module == '*':
if module not in self.localModule.keys():
self.localModule[module]= [mod_from]
else:
self.localModule[module].append(mod_from)
elif module not in self.localModule.keys():
self.localModule[module] = {'from':mod_from, 'as':mod_as}
def addModule(self, module, mod_from=None, mod_as=None):
if module not in self.importModule.keys():
self.importModule[module] = {'from':mod_from, 'as':mod_as}
if module in sys.stdlib_module_names:
self.stdlibModule.append(module)
elif isinstance(self.importModule[module], list):
if mod_as not in [x['as'] for x in self.importModule[module]]:
self.importModule[module].append({'from':mod_from, 'as':mod_as})
elif mod_as not in [x['from'] for x in self.importModule[module]]:
self.importModule[module].append({'from':mod_from, 'as':mod_as})
elif mod_as != self.importModule[module]['as']:
as_list = [self.importModule[module]]
as_list.append({'from':mod_from, 'as':mod_as})
self.importModule[module] = as_list
elif mod_from != self.importModule[module]['from']:
as_list = [self.importModule[module]]
as_list.append({'from':mod_from, 'as':mod_as})
self.importModule[module] = as_list
def getModules(self):
return (self.importModule, self.stdlibModule)
def getBaseModule(self, extra_importers=[]):
modules_alias = { 'sklearn':'scikit-learn',
'genetic_selection':'sklearn-genetic',
'google': 'google-cloud-storage',
'azure':'azure-storage-file-datalake'}
local_modules = {'AIX':'/app/AIX-0.1-py3-none-any.whl'}
modules = []
require = ""
if extra_importers:
extra_importers = [importer.importModule for importer in extra_importers if isinstance(importer, importModule)]
importers_module = [self.importModule] + extra_importers
for importer_module in importers_module:
for k,v in importer_module.items():
if v['from']:
mod = v['from'].split('.')[0]
else:
mod = k
if mod in modules_alias.keys():
mod = modules_alias[mod]
modules.append(mod)
modules = list(set(modules))
for mod in modules:
try:
if mod in local_modules.keys():
require += f"{local_modules[mod]}\\n"
else:
require += f"{mod}=={version(mod)}\\n"
except :
if mod not in sys.stdlib_module_names:
raise
return require
def getCode(self):
def to_string(k, v):
mod = ''
if v['from']:
mod += 'from {} '.format(v['from'])
mod += 'import {}'.format(k)
if v['as']:
mod += ' as {} '.format(v['as'])
return mod
modules = ""
local_modules = ""
std_lib_modules = ""
third_party_modules = ""
for k,v in self.importModule.items():
if k in self.stdlibModule:
std_lib_modules = std_lib_modules + '\\n' + to_string(k, v)
elif isinstance(v, dict):
third_party_modules = third_party_modules + '\\n' + to_string(k, v)
elif isinstance(v, list):
for alias in v:
third_party_modules = third_party_modules + '\\n' + to_string(k, alias)
for k,v in self.localModule.items():
if k != '*':
local_modules = local_modules + '\\n' + to_string(k, v)
else:
for mod_from in v:
local_modules = local_modules + '\\n' + f'from {mod_from} import {k}'
if std_lib_modules:
modules = modules + "\\n#Standard Library modules" + std_lib_modules
if third_party_modules:
modules = modules + "\\n\\n#Third Party modules" + third_party_modules
if local_modules:
modules = modules + "\\n\\n#local modules" + local_modules + '\\n'
return modules
def copyCode(self, importer):
self.importModule, self.stdlibModule = importer.getModules()
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 20 |
22
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class learner():
def __init__(self, problem_type="classification", target_feature="", sample_method=None,indent=0, tab_size=4):
self.tab = " "*tab_size
self.df_name = 'df'
self.problem_type = problem_type
self.target_feature = target_feature
self.search_space = []
self.codeText = f"\\ndef train(log):"
self.input_files = {}
self.output_files = {}
self.function_code = ''
self.addInputFiles({'inputData' : 'featureEngineeredData.dat', 'metaData' : 'modelMetaData.json','monitor':'monitoring.json'})
def addInputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def addOutputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def getInputFiles(self):
text = 'IOFiles = '
if not self.input_files:
text += '{ }'
else:
text += json.dumps(self.input_files, indent=4)
return text
def getOutputFiles(self):
text = 'output_file = '
if not self.output_files:
text += '{ }'
else:
text += json.dumps(self.output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\\n'
text += self.getInputFiles()
if indent:
text = text.replace('\\n', self.tab * indent + '\\n')
return text
def __addValidateConfigCode(self):
text = "\\n\\
\\ndef validateConfig():\\
\\n config_file = Path(__file__).parent/'config.json'\\
\\n if not Path(config_file).exists():\\
\\n raise ValueError(f'Config file is missing: {config_file}')\\
\\n config = utils.read_json(config_file)\\
\\n return config"
return text
def addStatement(self, statement, indent=1):
self.codeText += '\\n' + self.tab * indent + statement
def getCode(self):
return self.function_code + '\\n' + self.codeText
def addLocalFunctionsCode(self):
self.function_code += self.__addValidateConfigCode()
def getPrefixModules(self):
modules = [{'module':'Path', 'mod_from':'pathlib'}
,{'module':'pandas', 'mod_as':'pd'}
]
return modules
def addPrefixCode(self, indent=1):
self.codeText += "\\
"
def getSuffixModules(self):
modules = []
return modules
def addSuffixCode(self, indent=1):
self.codeText += "\\n\\
"
def getMainCodeModules(self):
modules = [{'module':'logging'}
]
return modules
def getMlpCodeModules(self):
modules = [{'module':'math'}
,{'module':'json'}
,{'module':'joblib'}
,{'module':'keras_tuner'}
,{'module':'pandas', 'mod_as':'pd'}
,{'module':'numpy', 'mod_as':'np'}
,{'module':'Path', 'mod_from':'pathlib'}
,{'module':'r2_score', 'mod_from':'sklearn.metrics'}
,{'module':'mean_squared_error', 'mod_from':'sklearn.metrics'}
,{'module':'mean_absolute_error', 'mod_from':'sklearn.metrics'}
,{'module':'Dense', 'mod_from':'tensorflow.keras.layers'}
,{'module':'Sequential', 'mod_from':'tensorflow.keras'}
,{'module':'Dropout', 'mod_from':'tensorflow.keras.layers'}
]
return modules
def addMlpCode(self):
self.codeText = """
def getdlparams(config):
for k, v in config.items():
if (k == "activation"):
activation_fn = str(v)
elif (k == "optimizer"):
optimizer = str(v)
elif (k == "loss"):
loss_fn = str(v)
elif (k == "first_layer"):
if not isinstance(k, list):
first_layer = str(v).split(',')
else:
first_layer = k
elif (k == "lag_order"):
lag_order = int(v)
elif (k == "hidden_layers"):
hidden_layers = int(v)
elif (k == "dropout"):
if not isinstance(k, list):
dropout = str(v).split(',')
else:
dropout = k
elif (k == "batch_size"):
batch_size = int(v)
elif (k == "epochs"):
epochs = int(v)
elif (k == "model_name"):
model_name = str(v)
return activation_fn, optimizer, loss_fn, first_layer, lag_order, hidden_layers, dropout, batch_size, epochs, model_name
def numpydf(dataset, look_back):
dataX, dataY = [], []
for i in range(len(dataset) - look_back - 1):
subset = dataset[i:(i + look_back), 0]
dataX.append(subset)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
def startTraining(dataset,train_size,mlpConfig,filename_scaler,target_feature,scoreParam,log):
log.info('Training started')
activation_fn, optimizer, loss_fn, first_layer, hidden_layers, look_back, dropout, batch_size, epochs, model_name = getdlparams(mlpConfig)
hp = keras_tuner.HyperParameters()
first_layer_min = round(int(first_layer[0]))
first_layer_max = round(int(first_layer[1]))
dropout_min = float(dropout[0])
dropout_max = float(dropout[1])
dataset = dataset.values
train, test = dataset[0:train_size, :], dataset[train_size:len(dataset), :]
trainX, trainY = numpydf(train, look_back)
testX, testY = numpydf(test, look_back)
# create and fit Multilayer Perceptron model
model = Sequential()
model.add(Dense(units=hp.Int('units',min_value=first_layer_min,max_value=first_layer_max,step=16), input_dim=look_back, activation=activation_fn)) #BUGID 13484
model.add(Dropout(hp.Float('Dropout_rate',min_value=dropout_min,max_value=dropout_max,step=0.1))) #BUGID 13484
model.add(Dense(1, activation='sigmoid'))
model.compile(loss=loss_fn, optimizer=optimizer)
model_fit = model.fit(trainX, trainY, epochs=epochs, batch_size=batch_size, verbose=2)
# Estimate model performance
trainScore = model.evaluate(trainX, trainY, verbose=0)
testScore = model.evaluate(testX, testY, verbose=0)
# Scoring values for the model
mse_eval = testScore
rmse_eval = math.sqrt(testScore)
# generate predictions for training
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
scaler = joblib.load(filename_scaler)
trainY = scaler.inverse_transform([trainY])
trainPredict = scaler.inverse_transform(trainPredict)
## For test data
testY = scaler.inverse_transform([testY])
testPredict = scaler.inverse_transform(testPredict)
mse_mlp = mean_squared_error(testY.T, testPredict)
scores = {}
r2 = round(r2_score(testY.T, testPredict), 2)
scores['R2'] = r2
mae = round(mean_absolute_error(testY.T, testPredict), 2)
scores['MAE'] = mae
scores['MSE'] = round(mse_mlp, 2)
rmse = round(math.sqrt(mse_mlp), 2)
scores['RMSE'] = rmse
scores[scoreParam] = scores.get(scoreParam.upper(), scores['MSE'])
log.info("mlp rmse: "+str(rmse))
log.info("mlp mse: "+str(round(mse_mlp, 2)))
log.info("mlp r2: "+str(r2))
log.info("mlp mae: "+str(mae))
return model, look_back, scaler,testScore,trainScore,scores
def train(config, targetPath, log):
dataLoc = targetPath / IOFiles['inputData']
if not dataLoc.exists():
return {'Status': 'Failure', 'Message': 'Data location does not exists.'}
status = dict()
usecase = config['targetPath']
df = utils.read_data(dataLoc)
target_feature = config['target_feature']
dateTimeFeature= config['dateTimeFeature']
df.set_index(dateTimeFeature, inplace=True)
train_size = int(len(df) * (1-config['test_ratio'])) #BugID:13217
mlpConfig = config['algorithms']['MLP']
filename = meta_data['transformation']['Status']['Normalization_file']
scoreParam = config['scoring_criteria']
log.info('Training MLP for TimeSeries')
mlp_model, look_back, scaler,testScore,trainScore, error_matrix = startTraining(df,train_size,mlpConfig,filename,target_feature,scoreParam,log)
score = error_matrix[scoreParam]
# Training model
model_path = targetPath/'runs'/str(meta_data['monitoring']['runId'])/model_name
model_file_name = str(model_path/'model')
mlp_model.save(model_file_name)
meta_data['training'] = {}
meta_data['training']['model_filename'] = model_file_name
meta_data['training']['dateTimeFeature'] = dateTimeFeature
meta_data['training']['target_feature'] = target_feature
utils.write_json(meta_data, targetPath / IOFiles['metaData'])
utils.write_json({'scoring_criteria': scoreParam, 'metrices': error_matrix,'score':error_matrix[scoreParam]}, model_path / IOFiles['metrics'])
# return status
status = {'Status': 'Success', 'errorMatrix': error_matrix, 'test_score':testScore, 'train_score': trainScore,'score':error_matrix[scoreParam]}
log.info(f'Test score: {testScore}')
log.info(f'Train score: {trainScore}')
log.info(f'output: {status}')
return json.dumps(status)
"""
def getLstmCodeModules(self):
modules = [{'module':'math'}
,{'module':'json'}
,{'module':'joblib'}
,{'module':'keras_tuner'}
,{'module':'pandas', 'mod_as':'pd'}
,{'module':'numpy', 'mod_as':'np'}
,{'module':'Path', 'mod_from':'pathlib'}
,{'module':'r2_score', 'mod_from':'sklearn.metrics'}
,{'module':'mean_squared_error', 'mod_from':'sklearn.metrics'}
,{'module':'mean_absolute_error', 'mod_from':'sklearn.metrics'}
,{'module':'Dense', 'mod_from':'tensorflow.keras.layers'}
,{'module':'Sequential', 'mod_from':'tensorflow.keras'}
,{'module':'Dropout', 'mod_from':'tensorflow.keras.layers'}
,{'module':'LSTM', 'mod_from':'tensorflow.keras.layers'}
,{'module':'TimeseriesGenerator', 'mod_from':'tensorflow.keras.preprocessing.sequence'}
,{'module':'train_test_split', 'mod_from':'sklearn.model_selection'}
]
return modules
def addLstmCode(self):
self.codeText = """
def getdlparams(config):
for k, v in config.items():
if (k == "activation"):
activation_fn = str(v)
elif (k == "optimizer"):
optimizer = str(v)
elif (k == "loss"):
loss_fn = str(v)
elif (k == "first_layer"):
if not isinstance(k, list):
first_layer = str(v).split(',')
else:
first_layer = k
elif (k == "lag_order"):
lag_order = int(v)
elif (k == "hidden_layers"):
hidden_layers = int(v)
elif (k == "dropout"):
if not isinstance(k, list):
dropout = str(v).split(',')
else:
dropout = k
elif (k == "batch_size"):
batch_size = int(v)
elif (k == "epochs"):
epochs = int(v)
return activation_fn, optimizer, loss_fn, first_layer, lag_order, hidden_layers, dropout, batch_size, epochs
def numpydf(dataset, look_back):
dataX, dataY = [], []
for i in range(len(dataset) - look_back - 1):
subset = dataset[i:(i + look_back), 0]
dataX.append(subset)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
def startTraining(dataset,test_size,mlpConfig,filename_scaler,target_feature,scoreParam,log):
log.info('Training started')
activation_fn, optimizer, loss_fn, first_layer, look_back,hidden_layers, dropout, batch_size, epochs= getdlparams(mlpConfig)
n_features = len(target_feature)
n_input = look_back
hp = keras_tuner.HyperParameters()
first_layer_min = round(int(first_layer[0]))
first_layer_max = round(int(first_layer[1]))
dropout_min = float(dropout[0])
dropout_max = float(dropout[1])
dataset = dataset[target_feature]
dataset_np = dataset.values
train, test = train_test_split(dataset_np, test_size=test_size, shuffle=False)
generatorTra |
in = TimeseriesGenerator(train, train, length=n_input, batch_size=8)
generatorTest = TimeseriesGenerator(test, test, length=n_input, batch_size=8)
batch_0 = generatorTrain[0]
x, y = batch_0
epochs = int(epochs)
##Multivariate LSTM model
model = Sequential()
model.add(LSTM(units=hp.Int('units',min_value=first_layer_min,max_value=first_layer_max,step=16), activation=activation_fn, input_shape=(n_input, n_features)))
model.add(Dropout(hp.Float('Dropout_rate',min_value=dropout_min,max_value=dropout_max,step=0.1)))
model.add(Dense(n_features))
model.compile(optimizer=optimizer, loss=loss_fn)
# model.fit(generatorTrain,epochs=epochs,batch_size=self.batch_size,shuffle=False)
model.fit_generator(generatorTrain, steps_per_epoch=1, epochs=epochs, shuffle=False, verbose=0)
# lstm_mv_testScore_mse = model.evaluate(x, y, verbose=0)
predictions = []
future_pred_len = n_input
# To get values for prediction,taking look_back steps of rows
first_batch = train[-future_pred_len:]
c_batch = first_batch.reshape((1, future_pred_len, n_features))
current_pred = None
for i in range(len(test)):
# get pred for firstbatch
current_pred = model.predict(c_batch)[0]
predictions.append(current_pred)
# remove first val
c_batch_rmv_first = c_batch[:, 1:, :]
# update
c_batch = np.append(c_batch_rmv_first, [[current_pred]], axis=1)
## Prediction, inverse the minmax transform
scaler = joblib.load(filename_scaler)
prediction_actual = scaler.inverse_transform(predictions)
test_data_actual = scaler.inverse_transform(test)
mse = None
rmse = None
## Creating dataframe for actual,predictions
pred_cols = list()
for i in range(len(target_feature)):
pred_cols.append(target_feature[i] + '_pred')
predictions = pd.DataFrame(prediction_actual, columns=pred_cols)
actual = pd.DataFrame(test_data_actual, columns=target_feature)
actual.columns = [str(col) + '_actual' for col in dataset.columns]
df_predicted = pd.concat([actual, predictions], axis=1)
print("LSTM Multivariate prediction dataframe: \\\\n" + str(df_predicted))
# df_predicted.to_csv('mlp_prediction.csv')
from math import sqrt
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error
target = target_feature
mse_dict = {}
rmse_dict = {}
mae_dict = {}
r2_dict = {}
lstm_var = 0
for name in target:
index = dataset.columns.get_loc(name)
mse = mean_squared_error(test_data_actual[:, index], prediction_actual[:, index])
mse_dict[name] = mse
rmse = sqrt(mse)
rmse_dict[name] = rmse
lstm_var = lstm_var + rmse
print("Name of the target feature: " + str(name))
print("RMSE of the target feature: " + str(rmse))
r2 = r2_score(test_data_actual[:, index], prediction_actual[:, index])
r2_dict[name] = r2
mae = mean_absolute_error(test_data_actual[:, index], prediction_actual[:, index])
mae_dict[name] = mae
## For VAR comparison, send last target mse and rmse from above dict
lstm_var = lstm_var / len(target)
select_msekey = list(mse_dict.keys())[-1]
l_mse = list(mse_dict.values())[-1]
select_rmsekey = list(rmse_dict.keys())[-1]
l_rmse = list(rmse_dict.values())[-1]
select_r2key = list(r2_dict.keys())[-1]
l_r2 = list(r2_dict.values())[-1]
select_maekey = list(mae_dict.keys())[-1]
l_mae = list(mae_dict.values())[-1]
log.info('Selected target feature of LSTM for best model selection: ' + str(select_rmsekey))
scores = {}
scores['R2'] = l_r2
scores['MAE'] = l_mae
scores['MSE'] = l_mse
scores['RMSE'] = l_rmse
scores[scoreParam] = scores.get(scoreParam.upper(), scores['MSE'])
log.info("lstm rmse: "+str(l_rmse))
log.info("lstm mse: "+str(l_mse))
log.info("lstm r2: "+str(l_r2))
log.info("lstm mae: "+str(l_mae))
return model,look_back,scaler, scores
def train(config, targetPath, log):
dataLoc = targetPath / IOFiles['inputData']
if not dataLoc.exists():
return {'Status': 'Failure', 'Message': 'Data location does not exists.'}
status = dict()
usecase = config['targetPath']
df = utils.read_data(dataLoc)
target_feature = config['target_feature']
dateTimeFeature= config['dateTimeFeature']
scoreParam = config['scoring_criteria']
testSize = config['test_ratio']
lstmConfig = config['algorithms']['LSTM']
filename = meta_data['transformation']['Status']['Normalization_file']
if (type(target_feature) is list):
pass
else:
target_feature = list(target_feature.split(","))
df.set_index(dateTimeFeature, inplace=True)
log.info('Training LSTM for TimeSeries')
mlp_model, look_back, scaler, error_matrix = startTraining(df,testSize,lstmConfig,filename,target_feature,scoreParam,log)
score = error_matrix[scoreParam]
log.info("LSTM Multivariant all scoring param results: "+str(error_matrix))
# Training model
model_path = targetPath/'runs'/str(meta_data['monitoring']['runId'])/model_name
model_file_name = str(model_path/'model')
mlp_model.save(model_file_name)
meta_data['training'] = {}
meta_data['training']['model_filename'] = model_file_name
meta_data['training']['dateTimeFeature'] = dateTimeFeature
meta_data['training']['target_feature'] = target_feature
utils.write_json(meta_data, targetPath / IOFiles['metaData'])
utils.write_json({'scoring_criteria': scoreParam, 'metrices': error_matrix,'score':error_matrix[scoreParam]}, model_path / IOFiles['metrics'])
# return status
status = {'Status': 'Success', 'errorMatrix': error_matrix,'score':error_matrix[scoreParam]}
log.info(f'score: {error_matrix[scoreParam]}')
log.info(f'output: {status}')
return json.dumps(status)
"""
def addMainCode(self, indent=1):
self.codeText += """
if __name__ == '__main__':
config = validateConfig()
targetPath = Path('aion') / config['targetPath']
if not targetPath.exists():
raise ValueError(f'targetPath does not exist')
meta_data_file = targetPath / IOFiles['metaData']
if meta_data_file.exists():
meta_data = utils.read_json(meta_data_file)
else:
raise ValueError(f'Configuration file not found: {meta_data_file}')
log_file = targetPath / IOFiles['log']
log = utils.logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)
try:
print(train(config, targetPath, log))
except Exception as e:
status = {'Status': 'Failure', 'Message': str(e)}
print(json.dumps(status))
"""
def add_variable(self, name, value, indent=1):
if isinstance(value, str):
self.codeText += f"\\n{self.tab * indent}{name} = '{value}'"
else:
self.codeText += f"\\n{self.tab * indent}{name} = {value}"
def addStatement(self, statement, indent=1):
self.codeText += f"\\n{self.tab * indent}{statement}"
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
class input_drift():
def __init__(self, tab_size=4):
self.tab = ' ' * tab_size
self.codeText = ''
def addInputDriftClass(self):
text = "\\
\\nclass inputdrift():\\
\\n\\
\\n def __init__(self,base_config):\\
\\n self.usecase = base_config['modelName'] + '_' + base_config['modelVersion']\\
\\n self.currentDataLocation = base_config['currentDataLocation']\\
\\n home = Path.home()\\
\\n if platform.system() == 'Windows':\\
\\n from pathlib import WindowsPath\\
\\n output_data_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'Data'\\
\\n output_model_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'target'/self.usecase\\
\\n else:\\
\\n from pathlib import PosixPath\\
\\n output_data_dir = PosixPath(home)/'HCLT'/'AION'/'Data'\\
\\n output_model_dir = PosixPath(home)/'HCLT'/'AION'/'target'/self.usecase\\
\\n if not output_model_dir.exists():\\
\\n raise ValueError(f'Configuration file not found at {output_model_dir}')\\
\\n\\
\\n tracking_uri = 'file:///' + str(Path(output_model_dir)/'mlruns')\\
\\n registry_uri = 'sqlite:///' + str(Path(output_model_dir)/'mlruns.db')\\
\\n mlflow.set_tracking_uri(tracking_uri)\\
\\n mlflow.set_registry_uri(registry_uri)\\
\\n client = mlflow.tracking.MlflowClient(\\
\\n tracking_uri=tracking_uri,\\
\\n registry_uri=registry_uri,\\
\\n )\\
\\n model_version_uri = 'models:/{model_name}/production'.format(model_name=self.usecase)\\
\\n model = mlflow.pyfunc.load_model(model_version_uri)\\
\\n run = client.get_run(model.metadata.run_id)\\
\\n if run.info.artifact_uri.startswith('file:'):\\
\\n artifact_path = Path(run.info.artifact_uri[len('file:///') : ])\\
\\n else:\\
\\n artifact_path = Path(run.info.artifact_uri)\\
\\n self.trainingDataPath = artifact_path/(self.usecase + '_data.csv')\\
\\n\\
\\n def get_input_drift(self,current_data, historical_data):\\
\\n curr_num_feat = current_data.select_dtypes(include='number')\\
\\n hist_num_feat = historical_data.select_dtypes(include='number')\\
\\n num_features = [feat for feat in historical_data.columns if feat in curr_num_feat]\\
\\n alert_count = 0\\
\\n data = {\\
\\n 'current':{'data':current_data},\\
\\n 'hist': {'data': historical_data}\\
\\n }\\
\\n dist_changed_columns = []\\
\\n dist_change_message = []\\
\\n for feature in num_features:\\
\\n curr_static_value = st.ks_2samp( hist_num_feat[feature], curr_num_feat[feature]).pvalue\\
\\n if (curr_static_value < 0.05):\\
\\n distribution = {}\\
\\n distribution['hist'] = self.DistributionFinder( historical_data[feature])\\
\\n distribution['curr'] = self.DistributionFinder( current_data[feature])\\
\\n if(distribution['hist']['name'] == distribution['curr']['name']):\\
\\n pass\\
\\n else:\\
\\n alert_count = alert_count + 1\\
\\n dist_changed_columns.append(feature)\\
\\n changed_column = {}\\
\\n changed_column['Feature'] = feature\\
\\n changed_column['KS_Training'] = curr_static_value\\
\\n changed_column['Training_Distribution'] = distribution['hist']['name']\\
\\n changed_column['New_Distribution'] = distribution['curr']['name']\\
\\n dist_change_message.append(changed_column)\\
\\n if alert_count:\\
\\n resultStatus = dist_change_message\\
\\n else :\\
\\n resultStatus='Model is working as expected'\\
\\n return(alert_count, resultStatus)\\
\\n\\
\\n def DistributionFinder(self,data):\\
\\n best_distribution =''\\
\\n best_sse =0.0\\
\\n if(data.dtype in ['int','int64']):\\
\\n distributions= {'bernoulli':{'algo':st.bernoulli},\\
\\n 'binom':{'algo':st.binom},\\
\\n 'geom':{'algo':st.geom},\\
\\n 'nbinom':{'algo':st.nbinom},\\
\\n 'poisson':{'algo':st.poisson}\\
\\n }\\
\\n index, counts = np.unique(data.astype(int),return_counts=True)\\
\\n if(len(index)>=2):\\
\\n best_sse = np.inf\\
\\n y1=[]\\
\\n total=sum(counts)\\
\\n mean=float(sum(index*counts))/total\\
|
\\n variance=float((sum(index**2*counts) -total*mean**2))/(total-1)\\
\\n dispersion=mean/float(variance)\\
\\n theta=1/float(dispersion)\\
\\n r=mean*(float(theta)/1-theta)\\
\\n\\
\\n for j in counts:\\
\\n y1.append(float(j)/total)\\
\\n distributions['bernoulli']['pmf'] = distributions['bernoulli']['algo'].pmf(index,mean)\\
\\n distributions['binom']['pmf'] = distributions['binom']['algo'].pmf(index,len(index),p=mean/len(index))\\
\\n distributions['geom']['pmf'] = distributions['geom']['algo'].pmf(index,1/float(1+mean))\\
\\n distributions['nbinom']['pmf'] = distributions['nbinom']['algo'].pmf(index,mean,r)\\
\\n distributions['poisson']['pmf'] = distributions['poisson']['algo'].pmf(index,mean)\\
\\n\\
\\n sselist = []\\
\\n for dist in distributions.keys():\\
\\n distributions[dist]['sess'] = np.sum(np.power(y1 - distributions[dist]['pmf'], 2.0))\\
\\n if np.isnan(distributions[dist]['sess']):\\
\\n distributions[dist]['sess'] = float('inf')\\
\\n best_dist = min(distributions, key=lambda v: distributions[v]['sess'])\\
\\n best_distribution = best_dist\\
\\n best_sse = distributions[best_dist]['sess']\\
\\n\\
\\n elif (len(index) == 1):\\
\\n best_distribution = 'Constant Data-No Distribution'\\
\\n best_sse = 0.0\\
\\n elif(data.dtype in ['float64','float32']):\\
\\n distributions = [st.uniform,st.expon,st.weibull_max,st.weibull_min,st.chi,st.norm,st.lognorm,st.t,st.gamma,st.beta]\\
\\n best_distribution = st.norm.name\\
\\n best_sse = np.inf\\
\\n nrange = data.max() - data.min()\\
\\n\\
\\n y, x = np.histogram(data.astype(float), bins='auto', density=True)\\
\\n x = (x + np.roll(x, -1))[:-1] / 2.0\\
\\n\\
\\n for distribution in distributions:\\
\\n with warnings.catch_warnings():\\
\\n warnings.filterwarnings('ignore')\\
\\n params = distribution.fit(data.astype(float))\\
\\n arg = params[:-2]\\
\\n loc = params[-2]\\
\\n scale = params[-1]\\
\\n pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)\\
\\n sse = np.sum(np.power(y - pdf, 2.0))\\
\\n if( sse < best_sse):\\
\\n best_distribution = distribution.name\\
\\n best_sse = sse\\
\\n\\
\\n return {'name':best_distribution, 'sse': best_sse}\\
\\n\\
"
return text
def addSuffixCode(self, indent=1):
text ="\\n\\
\\ndef check_drift( config):\\
\\n inputdriftObj = inputdrift(config)\\
\\n historicaldataFrame=pd.read_csv(inputdriftObj.trainingDataPath)\\
\\n currentdataFrame=pd.read_csv(inputdriftObj.currentDataLocation)\\
\\n dataalertcount,message = inputdriftObj.get_input_drift(currentdataFrame,historicaldataFrame)\\
\\n if message == 'Model is working as expected':\\
\\n output_json = {'status':'SUCCESS','data':{'Message':'Model is working as expected'}}\\
\\n else:\\
\\n output_json = {'status':'SUCCESS','data':{'Affected Columns':message}}\\
\\n return(output_json)\\
\\n\\
\\nif __name__ == '__main__':\\
\\n try:\\
\\n if len(sys.argv) < 2:\\
\\n raise ValueError('config file not present')\\
\\n config = sys.argv[1]\\
\\n if Path(config).is_file() and Path(config).suffix == '.json':\\
\\n with open(config, 'r') as f:\\
\\n config = json.load(f)\\
\\n else:\\
\\n config = json.loads(config)\\
\\n output = check_drift(config)\\
\\n status = {'Status':'Success','Message':output}\\
\\n print('input_drift:'+json.dumps(status))\\
\\n except Exception as e:\\
\\n status = {'Status':'Failure','Message':str(e)}\\
\\n print('input_drift:'+json.dumps(status))"
return text
def addStatement(self, statement, indent=1):
self.codeText += '\\n' + self.tab * indent + statement
def generateCode(self):
self.codeText += self.addInputDriftClass()
self.codeText += self.addSuffixCode()
def getCode(self):
return self.codeText
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class tabularDataReader():
def __init__(self, tab_size=4):
self.tab = ' ' * tab_size
self.function_code = ''
self.codeText = ''
self.code_generated = False
def getInputFiles(self):
IOFiles = {
"rawData": "rawData.dat",
"metaData" : "modelMetaData.json",
"log" : "aion.log",
"outputData" : "rawData.dat",
"monitoring":"monitoring.json",
"prodData": "prodData",
"prodDataGT":"prodDataGT"
}
text = 'IOFiles = '
if not IOFiles:
text += '{ }'
else:
text += json.dumps(IOFiles, indent=4)
return text
def getOutputFiles(self):
output_files = {
'metaData' : 'modelMetaData.json',
'log' : 'aion.log',
'outputData' : 'rawData.dat'
}
text = 'output_file = '
if not output_files:
text += '{ }'
else:
text += json.dumps(output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\\n'
text += self.getInputFiles()
if indent:
text = text.replace('\\n', self.tab * indent + '\\n')
return text
def __addValidateConfigCode(self):
text = "\\n\\
\\ndef validateConfig():\\
\\n config_file = Path(__file__).parent/'config.json'\\
\\n if not Path(config_file).exists():\\
\\n raise ValueError(f'Config file is missing: {config_file}')\\
\\n config = read_json(config_file)\\
\\n if not config['targetPath']:\\
\\n raise ValueError(f'Target Path is not configured')\\
\\n return config"
return text
def addMainCode(self, indent=1):
self.codeText += """
if __name__ == '__main__':
config = validateConfig()
targetPath = Path('aion') / config['targetPath']
targetPath.mkdir(parents=True, exist_ok=True)
if not targetPath.exists():
raise ValueError(f'targetPath does not exist')
meta_data_file = targetPath / IOFiles['metaData']
if not meta_data_file.exists():
raise ValueError(f'Configuration file not found: {meta_data_file}')
log_file = targetPath / IOFiles['log']
log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)
try:
print(load_data(config, targetPath, log))
except Exception as e:
status = {'Status': 'Failure', 'Message': str(e)}
print(json.dumps(status))
"""
def addLoadDataCode(self):
self.codeText += """
#This function will read the data and save the data on persistent storage
def load_data(config, targetPath, log):
meta_data_file = targetPath / IOFiles['metaData']
meta_data = read_json(meta_data_file)
if meta_data.get('monitoring', False) and not meta_data['monitoring'].get('retrain', False):
raise ValueError('New data is not enougth to retrain model')
df = read_data(config['dataLocation'])
status = {}
output_data_path = targetPath / IOFiles['outputData']
log.log_dataframe(df)
required_features = list(set(config['selected_features'] + config['dateTimeFeature'] + config['target_feature']))
log.info('Dataset features required: ' + ','.join(required_features))
missing_features = [x for x in required_features if x not in df.columns.tolist()]
if missing_features:
raise ValueError(f'Some feature/s is/are missing: {missing_features}')
log.info('Removing unused features: ' + ','.join(list(set(df.columns) - set(required_features))))
df = df[required_features]
log.info(f'Required features: {required_features}')
try:
log.info(f'Saving Dataset: {str(output_data_path)}')
write_data(df, output_data_path, index=False)
status = {'Status': 'Success', 'DataFilePath': IOFiles['outputData'], 'Records': len(df)}
except:
raise ValueError('Unable to create data file')
meta_data['load_data'] = {}
meta_data['load_data']['selected_features'] = [x for x in config['selected_features'] if
x != config['target_feature']]
meta_data['load_data']['Status'] = status
write_json(meta_data, meta_data_file)
output = json.dumps(status)
log.info(output)
return output
"""
def addValidateConfigCode(self, indent=1):
self.function_code += self.__addValidateConfigCode()
def addLocalFunctionsCode(self):
self.addValidateConfigCode()
def addStatement(self, statement, indent=1):
self.codeText += '\\n' + self.tab * indent + statement
def getCode(self):
return self.function_code + '\\n' + self.codeText
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class drift():
def __init__(self, indent=0, tab_size=4):
self.tab = " "*tab_size
self.codeText = ""
self.function_code = ""
self.input_files = {}
self.output_files = {}
self.addInputFiles({'log' : 'aion.log', 'metaData' : 'modelMetaData.json'})
def addInputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def addOutputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def getInputFiles(self):
text = 'IOFiles = '
if not self.input_files:
text += '{ }'
else:
text += json.dumps(self.input_files, indent=4)
return text
def getOutputFiles(self):
text = 'output_file = '
if not self.output_files:
text += '{ }'
else:
text += json.dumps(self.output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\\n'
text += self.getInputFiles()
if indent:
text = text.replace('\\n', self.tab * indent + '\\n')
return text
def __addValidateConfigCode(self):
text = "\\n\\
\\ndef validateConfig():\\
\\n config_file = Path(__file__).parent/'config.json'\\
\\n if not Path(config_file).exists():\\
\\n raise ValueError(f'Config file is missing: {config_file}')\\
\\n config = utils.read_json(config_file)\\
\\n return config\\
"
return text
def addLocalFunctionsCode(self):
self.function_code += self.__addValidateConfigCode()
def addPrefixCode(self, smaller_is_better=False, indent=1):
self.codeText += """
def monitoring(config, targetPath, log):
retrain = False
last_run_id = 0
retrain_threshold = config.get('retrainThreshold', 100)
meta_data_file = targetPath / IOFiles['metaData']
if meta_data_file.exists():
meta_data = utils.read_json(meta_data_file)
if not meta_data.get('register', None):
log.info('Last time Pipeline not executed properly')
retrain = True
else:
last_run_id = meta_data['register']['runId']
df = utils.read_data(config['dataLocation'])
df_len = len(df)
if not meta_data['monitoring'].get('endIndex', None):
meta_data['monitoring']['endIndex'] = int(meta_data['load_data']['Status']['Records'])
meta_data['monitoring']['endIndexTemp'] = meta_data['monitoring']['endIndex']
if meta_data['register'].get('registered', False):
meta_data['monitoring']['endIndex'] = meta_data['monitoring']['endIndexTemp']
meta_data['register'][' |
registered'] = False #ack registery
if (meta_data['monitoring']['endIndex'] + retrain_threshold) < df_len:
meta_data['monitoring']['endIndexTemp'] = df_len
retrain = True
else:
log.info('Pipeline running first time')
meta_data = {}
meta_data['monitoring'] = {}
retrain = True
if retrain:
meta_data['monitoring']['runId'] = last_run_id + 1
meta_data['monitoring']['retrain'] = retrain
utils.write_json(meta_data, targetPath/IOFiles['metaData'])
status = {'Status':'Success','retrain': retrain, 'runId':meta_data['monitoring']['runId']}
log.info(f'output: {status}')
return json.dumps(status)
"""
def getMainCodeModules(self):
modules = [{'module':'Path', 'mod_from':'pathlib'}
,{'module':'pandas','mod_as':'pd'}
,{'module':'json'}
]
return modules
def addMainCode(self, indent=1):
self.codeText += """
if __name__ == '__main__':
config = validateConfig()
targetPath = Path('aion') / config['targetPath']
targetPath.mkdir(parents=True, exist_ok=True)
log_file = targetPath / IOFiles['log']
log = utils.logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)
try:
print(monitoring(config, targetPath, log))
except Exception as e:
status = {'Status': 'Failure', 'Message': str(e)}
print(json.dumps(status))
"""
def addStatement(self, statement, indent=1):
self.codeText += f"\\n{self.tab * indent}{statement}"
def getCode(self, indent=1):
return self.function_code + '\\n' + self.codeText
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class selector():
def __init__(self, indent=0, tab_size=4):
self.tab = " "*tab_size
self.codeText = ""
self.pipe = 'pipe'
self.code_generated = False
self.input_files = {}
self.output_files = {}
self.function_code = ''
self.addInputFiles({'inputData' : 'transformedData.dat', 'metaData' : 'modelMetaData.json','log' : 'aion.log','outputData' : 'featureEngineeredData.dat'})
def addInputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def addOutputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def getInputFiles(self):
text = 'IOFiles = '
if not self.input_files:
text += '{ }'
else:
text += json.dumps(self.input_files, indent=4)
return text
def getOutputFiles(self):
text = 'output_file = '
if not self.output_files:
text += '{ }'
else:
text += json.dumps(self.output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\\n'
text += self.getInputFiles()
if indent:
text = text.replace('\\n', self.tab * indent + '\\n')
return text
def __addValidateConfigCode(self):
text = "\\n\\
\\ndef validateConfig():\\
\\n config_file = Path(__file__).parent/'config.json'\\
\\n if not Path(config_file).exists():\\
\\n raise ValueError(f'Config file is missing: {config_file}')\\
\\n config = read_json(config_file)\\
\\n return config"
return text
def addMainCode(self):
self.codeText += """
if __name__ == '__main__':
config = validateConfig()
targetPath = Path('aion') / config['targetPath']
if not targetPath.exists():
raise ValueError(f'targetPath does not exist')
meta_data_file = targetPath / IOFiles['metaData']
if meta_data_file.exists():
meta_data = read_json(meta_data_file)
else:
raise ValueError(f'Configuration file not found: {meta_data_file}')
log_file = targetPath / IOFiles['log']
log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)
try:
print(featureSelector(config,targetPath, log))
except Exception as e:
status = {'Status': 'Failure', 'Message': str(e)}
print(json.dumps(status))
"""
def addValidateConfigCode(self, indent=1):
self.function_code += self.__addValidateConfigCode()
def addStatement(self, statement, indent=1):
self.codeText += '\\n' + self.tab * indent + statement
def getCode(self):
return self.function_code + '\\n' + self.codeText
def addLocalFunctionsCode(self):
self.addValidateConfigCode()
def getPrefixModules(self):
modules = [{'module':'Path', 'mod_from':'pathlib'}
,{'module':'pandas', 'mod_as':'pd'}
]
return modules
def addPrefixCode(self, indent=1):
self.codeText += """
def featureSelector(config, targetPath, log):
dataLoc = targetPath / IOFiles['inputData']
if not dataLoc.exists():
return {'Status': 'Failure', 'Message': 'Data location does not exists.'}
status = dict()
df = pd.read_csv(dataLoc)
log.log_dataframe(df)
csv_path = str(targetPath / IOFiles['outputData'])
write_data(df, csv_path, index=False)
status = {'Status': 'Success', 'dataFilePath': IOFiles['outputData']}
log.info(f'Selected data saved at {csv_path}')
meta_data['featureengineering'] = {}
meta_data['featureengineering']['Status'] = status
write_json(meta_data, str(targetPath / IOFiles['metaData']))
log.info(f'output: {status}')
return json.dumps(status)
"""
def getSuffixModules(self):
modules = []
return modules
def addSuffixCode(self, indent=1):
self.codeText += ""
def getMainCodeModules(self):
modules = [
{'module':'json'}
,{'module':'logging'}
]
return modules
def addStatement(self, statement, indent=1):
self.codeText += f"\\n{self.tab * indent}{statement}"
def getPipe(self):
return self.pipe
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
import platform
from mlac.timeseries.core import *
from .utility import *
output_file_map = {
'text' : {'text' : 'text_profiler.pkl'},
'targetEncoder' : {'targetEncoder' : 'targetEncoder.pkl'},
'featureEncoder' : {'featureEncoder' : 'inputEncoder.pkl'},
'normalizer' : {'normalizer' : 'normalizer.pkl'}
}
def add_common_imports(importer):
common_importes = [
{'module': 'json', 'mod_from': None, 'mod_as': None},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'argparse', 'mod_from': None, 'mod_as': None},
{'module': 'platform', 'mod_from': None, 'mod_as': None }
]
for mod in common_importes:
importer.addModule(mod['module'], mod_from=mod['mod_from'], mod_as=mod['mod_as'])
def get_transformer_params(config):
param_keys = ["modelVersion","problem_type","target_feature","train_features","text_features","profiler","test_ratio","dateTimeFeature"] #BugID:13217
data = {key:value for (key,value) in config.items() if key in param_keys}
data['targetPath'] = config['modelName']
return data
def run_transformer(config):
transformer = profiler()
importer = importModule()
function = global_function()
importModules(importer, transformer.getPrefixModules())
importer.addModule('warnings')
transformer.addPrefixCode()
importModules(importer, transformer.getMainCodeModules())
transformer.addMainCode()
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'DataTransformation'
deploy_path.mkdir(parents=True, exist_ok=True)
generated_files = []
# create the utility file
importer.addLocalModule('*', mod_from='utility')
utility_obj = utility_function('transformer')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create empty init file to make a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
code = file_header(usecase)
code += "\\nimport os\\nos.path.abspath(os.path.join(__file__, os.pardir))\\n" #chdir to import from current dir
code += importer.getCode()
code += '\\nwarnings.filterwarnings("ignore")\\n'
code += transformer.getInputOutputFiles()
code += function.getCode()
transformer.addLocalFunctionsCode()
code += transformer.getCode()
with open(deploy_path/"aionCode.py", "w") as f:
f.write(code)
generated_files.append("aionCode.py")
with open(deploy_path/"requirements.txt", "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()])
f.write(req)
generated_files.append("requirements.txt")
config_file = deploy_path/"config.json"
config_data = get_transformer_params(config)
with open (config_file, "w") as f:
json.dump(config_data, f, indent=4)
generated_files.append("config.json")
create_docker_file('transformer', deploy_path,config['modelName'], generated_files)
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
from mlac.timeseries.core import *
from .utility import *
def get_register_params(config, models):
param_keys = ["modelVersion","problem_type"]
data = {key:value for (key,value) in config.items() if key in param_keys}
data['targetPath'] = config['modelName']
data['models'] = models
return data
def run_register(config):
importer = importModule()
registration = register(importer)
models = get_variable('models_name')
smaller_is_better = get_variable('smaller_is_better', False)
registration.addLocalFunctionsCode(models)
registration.addPrefixCode(smaller_is_better)
registration.addMainCode(models)
importModules(importer, registration.getMainCodeModules())
importer.addModule('warnings')
generated_files = []
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'ModelRegistry'
deploy_path.mkdir(parents=True, exist_ok=True)
# create the utility file
importer.addLocalModule('utility', mod_as='utils')
utility_obj = utility_function('register')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create empty init file required for creating a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
code = importer.getCode()
code += '\\nwarnings.filterwarnings("ignore")\\n'
code += registration.getInputOutputFiles()
code += registration.getCode()
# create serving file
with open(deploy_path/"aionCode.py", 'w') as f:
f.write(file_header(usecase) + code)
generated_files.append("aionCode.py")
# create requirements file
req_file = deploy_path/"requirements.txt"
with open(req_file, "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()])
f.write(req)
generated_files.append("requirements.txt")
# create config file
with open (deploy_path/"config.json", "w") as f:
json.dump(get_register_params(config, models), f, indent=4)
generated_files.append("config.json")
# create docker file
create_docker_file('register', |
deploy_path,config['modelName'], generated_files)
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import datetime
from pathlib import Path
variables = {}
def update_variable(name, value):
variables[name] = value
def get_variable(name, default=None):
return variables.get(name, default)
def append_variable(name, value):
data = get_variable(name)
if not data:
update_variable(name, [value])
elif not isinstance(data, list):
update_variable(name, [data, value])
else:
data.append(value)
update_variable(name, data)
def addDropFeature(feature, features_list, coder, indent=1):
coder.addStatement(f'if {feature} in {features_list}:', indent=indent)
coder.addStatement(f'{features_list}.remove({feature})', indent=indent+1)
def importModules(importer, modules_list):
for module in modules_list:
mod_from = module.get('mod_from',None)
mod_as = module.get('mod_as',None)
importer.addModule(module['module'], mod_from=mod_from, mod_as=mod_as)
def file_header(use_case, module_name=None):
time_str = datetime.datetime.now().isoformat(timespec='seconds', sep=' ')
text = "#!/usr/bin/env python\\n# -*- coding: utf-8 -*-\\n"
return text + f"'''\\nThis file is automatically generated by AION for {use_case} usecase.\\nFile generation time: {time_str}\\n'''"
def get_module_mapping(module):
mapping = {
"LogisticRegression": {'module':'LogisticRegression', 'mod_from':'sklearn.linear_model'}
,"GaussianNB": {'module':'GaussianNB', 'mod_from':'sklearn.naive_bayes'}
,"DecisionTreeClassifier": {'module':'DecisionTreeClassifier', 'mod_from':'sklearn.tree'}
,"SVC": {'module':'SVC', 'mod_from':'sklearn.svm'}
,"KNeighborsClassifier": {'module':'KNeighborsClassifier', 'mod_from':'sklearn.neighbors'}
,"GradientBoostingClassifier": {'module':'GradientBoostingClassifier', 'mod_from':'sklearn.ensemble'}
,'RandomForestClassifier':{'module':'RandomForestClassifier','mod_from':'sklearn.ensemble'}
,'XGBClassifier':{'module':'XGBClassifier','mod_from':'xgboost'}
,'LGBMClassifier':{'module':'LGBMClassifier','mod_from':'lightgbm'}
,'CatBoostClassifier':{'module':'CatBoostClassifier','mod_from':'catboost'}
,"LinearRegression": {'module':'LinearRegression', 'mod_from':'sklearn.linear_model'}
,"Lasso": {'module':'Lasso', 'mod_from':'sklearn.linear_model'}
,"Ridge": {'module':'Ridge', 'mod_from':'sklearn.linear_model'}
,"DecisionTreeRegressor": {'module':'DecisionTreeRegressor', 'mod_from':'sklearn.tree'}
,'RandomForestRegressor':{'module':'RandomForestRegressor','mod_from':'sklearn.ensemble'}
,'XGBRegressor':{'module':'XGBRegressor','mod_from':'xgboost'}
,'LGBMRegressor':{'module':'LGBMRegressor','mod_from':'lightgbm'}
,'CatBoostRegressor':{'module':'CatBoostRegressor','mod_from':'catboost'}
}
return mapping.get(module, None)
def create_docker_file(name, path,usecasename,files=[],text_feature=False):
text = ""
if name == 'load_data':
text='FROM python:3.8-slim-buster'
text+='\\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
for file in files:
text+=f'\\nCOPY {file} {file}'
text+='\\n'
text+='RUN pip install --no-cache-dir -r requirements.txt'
elif name == 'transformer':
text='FROM python:3.8-slim-buster\\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
text+='\\n'
for file in files:
text+=f'\\nCOPY {file} {file}'
if text_feature:
text+='COPY AIX-0.1-py3-none-any.whl AIX-0.1-py3-none-any.whl\\n'
text+='\\n'
text+='''RUN \\
'''
if text_feature:
text += ''' git && pip install requests && pip install git+https://github.com/MCFreddie777/language-check.git\\ &&
'''
text+=''' pip install --no-cache-dir -r requirements.txt\\
'''
if text_feature:
text += ''' && python -m nltk.downloader stopwords && python -m nltk.downloader punkt && python -m nltk.downloader wordnet && python -m nltk.downloader averaged_perceptron_tagger\\
'''
text+='\\n'
elif name == 'selector':
text='FROM python:3.8-slim-buster'
text+='\\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
text+='\\n'
for file in files:
text+=f'\\nCOPY {file} {file}'
text+='\\n'
text+='RUN pip install --no-cache-dir -r requirements.txt'
elif name == 'train':
text='FROM python:3.8-slim-buster'
text+='\\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
text+='\\n'
for file in files:
text+=f'\\nCOPY {file} {file}'
text+='\\n'
text+='RUN pip install --no-cache-dir -r requirements.txt'
elif name == 'register':
text='FROM python:3.8-slim-buster'
text+='\\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
text+='\\n'
for file in files:
text+=f'\\nCOPY {file} {file}'
text+='\\n'
text+='RUN pip install --no-cache-dir -r requirements.txt'
elif name == 'Prediction':
text='FROM python:3.8-slim-buster'
text+='\\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
text+='\\n'
for file in files:
text+=f'\\nCOPY {file} {file}'
text+='\\n'
if text_feature:
text+='COPY AIX-0.1-py3-none-any.whl AIX-0.1-py3-none-any.whl\\n'
text+='''RUN \\
'''
if text_feature:
text += ''' git && pip install requests && pip install git+https://github.com/MCFreddie777/language-check.git\\ &&
'''
text+='''pip install --no-cache-dir -r requirements.txt\\
'''
if text_feature:
text += ''' && python -m nltk.downloader stopwords && python -m nltk.downloader punkt && python -m nltk.downloader wordnet && python -m nltk.downloader averaged_perceptron_tagger\\
'''
text+='\\n'
text+='ENTRYPOINT ["python", "aionCode.py","-ip","0.0.0.0","-pn","8094"]\\n'
elif name == 'input_drift':
text='FROM python:3.8-slim-buster'
text+='\\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
text+='\\n'
for file in files:
text+=f'\\nCOPY {file} {file}'
text+='\\n'
text+='RUN pip install --no-cache-dir -r requirements.txt'
file_name = Path(path)/'Dockerfile'
with open(file_name, 'w') as f:
f.write(text)<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from .load_data import run_loader
from .transformer import run_transformer
from .selector import run_selector
from .trainer import run_trainer
from .register import run_register
from .deploy import run_deploy
from .drift_analysis import run_drift_analysis
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
from mlac.timeseries.core import *
from .utility import *
def get_deploy_params(config):
param_keys = ["modelVersion","problem_type","target_feature","lag_order","noofforecasts"]
data = {key:value for (key,value) in config.items() if key in param_keys}
data['targetPath'] = config['modelName']
data['ipAddress'] = '127.0.0.1'
data['portNo'] = '8094'
return data
def import_trainer_module(importer):
non_sklearn_modules = get_variable('non_sklearn_modules')
if non_sklearn_modules:
for mod in non_sklearn_modules:
module = get_module_mapping(mod)
mod_from = module.get('mod_from',None)
mod_as = module.get('mod_as',None)
importer.addModule(module['module'], mod_from=mod_from, mod_as=mod_as)
imported_modules = [
]
def run_deploy(config):
generated_files = []
importer = importModule()
deployer = deploy()
importModules(importer, imported_modules)
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'ModelServing'
deploy_path.mkdir(parents=True, exist_ok=True)
# create the utility file
importer.addLocalModule('utility', mod_as='utils')
utility_obj = utility_function('Prediction')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create empty init file required for creating a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
importModules(importer,deployer.getPredictionCodeModules())
code = file_header(usecase)
code += importer.getCode()
code += deployer.getInputOutputFiles()
deployer.addPredictionCode()
code += deployer.getCode()
# create prediction file
with open(deploy_path/"predict.py", 'w') as f:
f.write(code)
generated_files.append("predict.py")
# create create service file
with open(deploy_path/"aionCode.py", 'w') as f:
f.write(file_header(usecase) + deployer.getServiceCode())
generated_files.append("aionCode.py")
importer.addModule('seaborn')
importer.addModule('sklearn')
# create requirements file
req_file = deploy_path/"requirements.txt"
with open(req_file, "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()])
f.write(req)
generated_files.append("requirements.txt")
# create config file
config_file = deploy_path/"config.json"
config_data = get_deploy_params(config)
with open (config_file, "w") as f:
json.dump(config_data, f, indent=4)
generated_files.append("config.json")
# create docker file
create_docker_file('Prediction', deploy_path,config['modelName'], generated_files)<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
from mlac.timeseries.core import *
from mlac.timeseries.app import utility as utils
def get_model_name(algo, method):
if method == 'modelBased':
return algo + '_' + 'MLBased'
if method == 'statisticalBased':
return algo + '_' + 'StatisticsBased'
|
else:
return algo
def get_training_params(config, algo):
param_keys = ["modelVersion","problem_type","target_feature","train_features","scoring_criteria","test_ratio","optimization_param","dateTimeFeature"]#BugID:13217
data = {key:value for (key,value) in config.items() if key in param_keys}
data['algorithms'] = {algo: config['algorithms'][algo]}
data['targetPath'] = config['modelName']
return data
def update_score_comparer(scorer):
smaller_is_better_scorer = ['neg_mean_squared_error','mse','neg_root_mean_squared_error','rmse','neg_mean_absolute_error','mae']
if scorer.lower() in smaller_is_better_scorer:
utils.update_variable('smaller_is_better', True)
else:
utils.update_variable('smaller_is_better', False)
def run_trainer(config):
trainer = learner()
importer = importModule()
function = global_function()
utils.importModules(importer,trainer.getPrefixModules())
update_score_comparer(config['scoring_criteria'])
model_name = list(config['algorithms'].keys())[0]
if model_name == 'MLP':
utils.importModules(importer,trainer.getMlpCodeModules())
trainer.addMlpCode()
elif model_name == 'LSTM':
utils.importModules(importer,trainer.getLstmCodeModules())
trainer.addLstmCode()
trainer.addMainCode()
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/('ModelTraining'+'_' + model_name)
deploy_path.mkdir(parents=True, exist_ok=True)
generated_files = []
# create the utility file
importer.addLocalModule('utility', mod_as='utils')
utility_obj = utility_function('train')
with open(deploy_path/"utility.py", 'w') as f:
f.write(utils.file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create empty init file to make a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(utils.file_header(usecase))
generated_files.append("__init__.py")
importer.addModule("warnings")
code = importer.getCode()
code += 'warnings.filterwarnings("ignore")\\n'
code += f"\\nmodel_name = '{model_name}'\\n"
utils.append_variable('models_name',model_name)
out_files = {'log':f'{model_name}_aion.log','model':f'{model_name}_model.pkl','metrics':'metrics.json','metaDataOutput':f'{model_name}_modelMetaData.json','production':'production.json'}
trainer.addOutputFiles(out_files)
code += trainer.getInputOutputFiles()
code += function.getCode()
trainer.addLocalFunctionsCode()
code += trainer.getCode()
with open(deploy_path/"aionCode.py", "w") as f:
f.write(code)
generated_files.append("aionCode.py")
with open(deploy_path/"requirements.txt", "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()])
f.write(req)
generated_files.append("requirements.txt")
with open (deploy_path/"config.json", "w") as f:
json.dump(get_training_params(config, model_name), f, indent=4)
generated_files.append("config.json")
utils.create_docker_file('train', deploy_path,config['modelName'], generated_files)
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
import platform
from mlac.timeseries.core import *
from .utility import *
imported_modules = [
{'module': 'json', 'mod_from': None, 'mod_as': None},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'argparse', 'mod_from': None, 'mod_as': None},
{'module': 'platform', 'mod_from': None, 'mod_as': None }
]
def get_load_data_params(config):
param_keys = ["modelVersion","problem_type","target_feature","selected_features","dateTimeFeature","dataLocation"]
data = {key:value for (key,value) in config.items() if key in param_keys}
data['targetPath'] = config['modelName']
return data
def run_loader(config):
generated_files = []
importer = importModule()
loader = tabularDataReader()
importModules(importer, imported_modules)
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'DataIngestion'
deploy_path.mkdir(parents=True, exist_ok=True)
# create the utility file
importer.addLocalModule('*', mod_from='utility')
utility_obj = utility_function('load_data')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create the production data reader file
importer.addLocalModule('dataReader', mod_from='data_reader')
readers = ['sqlite','influx']
if 's3' in config.keys():
readers.append('s3')
reader_obj = data_reader(readers)
with open(deploy_path/"data_reader.py", 'w') as f:
f.write(file_header(usecase) + reader_obj.get_code())
generated_files.append("data_reader.py")
# create empty init file to make a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
code = file_header(usecase)
code += importer.getCode()
code += loader.getInputOutputFiles()
loader.addLocalFunctionsCode()
loader.addLoadDataCode()
loader.addMainCode()
code += loader.getCode()
with open(deploy_path/"aionCode.py", "w") as f:
f.write(code)
generated_files.append("aionCode.py")
with open(deploy_path/"requirements.txt", "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer(), reader_obj.get_importer()])
f.write(req)
generated_files.append("requirements.txt")
config_file = deploy_path/"config.json"
config_data = get_load_data_params(config)
with open (config_file, "w") as f:
json.dump(config_data, f, indent=4)
generated_files.append("config.json")
create_docker_file('load_data', deploy_path,config['modelName'],generated_files)<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
from mlac.timeseries.core import *
from .utility import *
def get_drift_params(config):
param_keys = ["modelVersion","problem_type","retrainThreshold","dataLocation"]
data = {key:value for (key,value) in config.items() if key in param_keys}
data['targetPath'] = config['modelName']
return data
def run_drift_analysis(config):
importer = importModule()
monitor = drift()
monitor.addLocalFunctionsCode()
monitor.addPrefixCode()
monitor.addMainCode()
importModules(importer, monitor.getMainCodeModules())
importer.addModule('warnings')
generated_files = []
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'ModelMonitoring'
deploy_path.mkdir(parents=True, exist_ok=True)
# create the utility file
importer.addLocalModule('utility', mod_as='utils')
utility_obj = utility_function('load_data')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create empty init file required for creating a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
code = importer.getCode()
code += '\\nwarnings.filterwarnings("ignore")\\n'
code += monitor.getInputOutputFiles()
code += monitor.getCode()
# create serving file
with open(deploy_path/"aionCode.py", 'w') as f:
f.write(file_header(usecase) + code)
generated_files.append("aionCode.py")
# create requirements file
req_file = deploy_path/"requirements.txt"
with open(req_file, "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()])
f.write(req)
generated_files.append("requirements.txt")
# create config file
with open (deploy_path/"config.json", "w") as f:
json.dump(get_drift_params(config), f, indent=4)
generated_files.append("config.json")
# create docker file
create_docker_file('input_drift', deploy_path,config['modelName'], generated_files)
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
import platform
from mlac.timeseries.core import *
from .utility import *
output_file_map = {
'feature_reducer' : {'feature_reducer' : 'feature_reducer.pkl'}
}
def get_selector_params(config):
param_keys = ["modelVersion","problem_type","target_feature","train_features","cat_features","n_components"]
data = {key:value for (key,value) in config.items() if key in param_keys}
data['targetPath'] = config['modelName']
return data
def run_selector(config):
select = selector()
importer = importModule()
function = global_function()
importModules(importer,select.getPrefixModules())
importModules(importer, select.getSuffixModules())
importModules(importer, select.getMainCodeModules())
select.addPrefixCode()
select.addSuffixCode()
select.addMainCode()
generated_files = []
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'FeatureEngineering'
deploy_path.mkdir(parents=True, exist_ok=True)
# create the utility file
importer.addLocalModule('*', mod_from='utility')
utility_obj = utility_function('selector')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create empty init file to make a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
code = file_header(usecase)
code += importer.getCode()
code += select.getInputOutputFiles()
code += function.getCode()
select.addLocalFunctionsCode()
code += select.getCode()
with open(deploy_path/"aionCode.py", "w") as f:
f.write(code)
generated_files.append("aionCode.py")
with open(deploy_path/"requirements.txt", "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()])
f.write(req)
generated_files.append("requirements.txt")
config_file = deploy_path/"config.json"
config_data = get_selector_params(config)
with open (config_file, "w") as f:
json.dump(config_data, f, indent=4)
generated_files.append("config.json")
create_docker_file('selector', deploy_path,config['modelName'], generated_files)<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from .imports import importModule
from .load_data import tabularDataReader
from .transformer import transformer as profiler
from .selector import selector
from .trainer import learner
from .deploy import deploy
from .functions import global_function
<s>
import pandas as pd
import numpy as np
from appbe.eda import ux_eda
from sklearn.preprocessing import LabelEncoder
import json
import matplotlib.pyplot as plt
import os
import mpld |
3
import subprocess
import os
import sys
import re
import json
import pandas as pd
from appbe.eda import ux_eda
from aif360.datasets import StandardDataset
from aif360.metrics import ClassificationMetric
from aif360.datasets import BinaryLabelDataset
def get_metrics(request):
dataFile = os.path.join(request.session['deploypath'], "data", "preprocesseddata.csv.gz")
predictionScriptPath = os.path.join(request.session['deploypath'], 'aion_predict.py')
displaypath = os.path.join(request.session['deploypath'], "etc", "display.json")
f = open(displaypath, "r")
configSettings = f.read()
f.close()
configSettings = json.loads(configSettings)
Target_feature = configSettings['targetFeature']
outputStr = subprocess.check_output([sys.executable, predictionScriptPath, dataFile])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)', str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
predict_dict = json.loads(outputStr)
df = pd.read_csv(dataFile)
df_p = pd.DataFrame.from_dict(predict_dict['data'])
d3_url = request.GET.get('d3_url')
mpld3_url = request.GET.get('mpld3_url')
df_temp = request.GET.get('feature')
global metricvalue
metricvalue = request.GET.get('metricvalue')
Protected_feature = df_temp
df_p = df_p.drop(columns=[Target_feature, 'remarks', 'probability'])
df_p.rename(columns={'prediction': Target_feature}, inplace=True)
eda_obj = ux_eda(dataFile, optimize=1)
features,dateFeature,seqFeature,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catfeatures = eda_obj.getFeatures()
features_to_Encode = features
categorical_names = {}
encoders = {}
for feature in features_to_Encode:
le = LabelEncoder()
le.fit(df[feature])
df[feature] = le.transform(df[feature])
le.fit(df_p[feature])
df_p[feature] = le.transform(df_p[feature])
categorical_names[feature] = le.classes_
encoders[feature] = le
new_list = [item for item in categorical_names[Protected_feature] if not(pd.isnull(item)) == True]
claas_size = len(new_list)
if claas_size > 10:
return 'HeavyFeature'
metrics = fair_metrics(categorical_names, Protected_feature,Target_feature, claas_size, df, df_p)
figure = plot_fair_metrics(metrics)
html_graph = mpld3.fig_to_html(figure,d3_url=d3_url,mpld3_url=mpld3_url)
return html_graph
def fair_metrics(categorical_names, Protected_feature,Target_feature, claas_size, df, df_p):
cols = [metricvalue]
obj_fairness = [[0]]
fair_metrics = pd.DataFrame(data=obj_fairness, index=['objective'], columns=cols)
for indx in range(claas_size):
priv_group = categorical_names[Protected_feature][indx]
privileged_class = np.where(categorical_names[Protected_feature] == priv_group)[0]
data_orig = StandardDataset(df,
label_name=Target_feature,
favorable_classes=[1],
protected_attribute_names=[Protected_feature],
privileged_classes=[privileged_class])
attr = data_orig.protected_attribute_names[0]
idx = data_orig.protected_attribute_names.index(attr)
privileged_groups = [{attr:data_orig.privileged_protected_attributes[idx][0]}]
unprivileged_size = data_orig.unprivileged_protected_attributes[0].size
unprivileged_groups = []
for idx2 in range(unprivileged_size):
unprivileged_groups.extend([{attr:data_orig.unprivileged_protected_attributes[idx][idx2]}])
bld = BinaryLabelDataset(df=df, label_names=[Target_feature], protected_attribute_names=[Protected_feature])
bld_p = BinaryLabelDataset(df=df_p, label_names=[Target_feature], protected_attribute_names=[Protected_feature])
ClsMet = ClassificationMetric(bld, bld_p,unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)
if metricvalue == "Theil Index":
row = pd.DataFrame([[ClsMet.theil_index()]],
columns = cols ,
index = [priv_group])
elif metricvalue == "Equal Opportunity Difference":
row = pd.DataFrame([[ClsMet.equal_opportunity_difference()]],
columns = cols ,
index = [priv_group])
elif metricvalue == "Disparate Impact":
row = pd.DataFrame([[ClsMet.disparate_impact()]],
columns = cols ,
index = [priv_group])
elif metricvalue == "Statistical Parity Difference":
row = pd.DataFrame([[ClsMet.statistical_parity_difference()]],
columns = cols ,
index = [priv_group])
#fair_metrics = fair_metrics.append(row)
fair_metrics = pd.concat([fair_metrics,row])
return fair_metrics
def plot_fair_metrics(fair_metrics):
import matplotlib.patches as patches
plt.style.use('default')
import seaborn as sns
fig, ax = plt.subplots(figsize=(10,4), ncols=1, nrows=1)
plt.subplots_adjust(
left = 0.125,
bottom = 0.1,
right = 0.9,
top = 0.9,
wspace = .5,
hspace = 1.1
)
y_title_margin = 1.2
plt.suptitle("Fairness metrics", y = 1.09, fontsize=20)
sns.set(style="dark")
cols = fair_metrics.columns.values
obj = fair_metrics.loc['objective']
if metricvalue == "Theil Index":
size_rect = [0.5]
rect = [-0.1]
bottom = [-0.1]
top = [2]
bound = [[-0.1,0.1]]
elif metricvalue == "Equal Opportunity Difference":
size_rect = [0.2]
rect = [-0.1]
bottom = [-1]
top = [1]
bound = [[-0.1,0.1]]
elif metricvalue == "Disparate Impact":
size_rect = [0.4]
rect = [0.8]
bottom = [0]
top = [2]
bound = [[-0.1,0.1]]
elif metricvalue == "Statistical Parity Difference":
size_rect = [0.2]
rect = [-0.1]
bottom = [-1]
top = [1]
bound = [[-0.1,0.1]]
for attr in fair_metrics.index[1:len(fair_metrics)].values:
check = [bound[i][0] < fair_metrics.loc[attr][i] < bound[i][1] for i in range(0,1)]
for i in range(0,1):
plt.subplot(1, 1, i+1)
xx = fair_metrics.index[1:len(fair_metrics)].values.tolist()
yy = fair_metrics.iloc[1:len(fair_metrics)][cols[i]].values.tolist()
palette = sns.color_palette('husl', len(xx))
ax = sns.pointplot(x=fair_metrics.index[1:len(fair_metrics)], y=yy, palette=palette, hue=xx)
index = 0
for p in zip(ax.get_xticks(), yy):
if (p[1] > 2.0):
_color = palette.as_hex()[index]
_val = 'Outlier(' + str(round(p[1],3)) + ')'
ax.text(p[0]-0.5, 0.02, _val, color=_color)
else:
ax.text(p[0], p[1]+0.05, round(p[1],3), color='k')
index = index + 1
plt.ylim(bottom[i], top[i])
plt.setp(ax.patches, linewidth=0)
ax.get_xaxis().set_visible(False)
ax.legend(loc='right', bbox_to_anchor=(1, 0.8), ncol=1)
ax.add_patch(patches.Rectangle((-5,rect[i]), 10, size_rect[i], alpha=0.3, facecolor="green", linewidth=1, linestyle='solid'))
# plt.axhline(obj[i], color='black', alpha=0.3)
plt.title(cols[i], fontname="Times New Roman", size=20,fontweight="bold")
ax.set_ylabel('')
ax.set_xlabel('')
return fig<s> import json
import os
def get_brier_score(request):
try:
displaypath = os.path.join(request.session['deploypath'], "etc", "output.json")
with open(displaypath) as file:
config = json.load(file)
problem_type = config["data"]["ModelType"]
brier_score = config["data"]["matrix"]["BRIER_SCORE"]
print(problem_type,brier_score)
except Exception as e:
#print(str(e))
raise ValueError(str(e))
return problem_type, brier_score
<s> import numpy as np
import joblib
import pandas as pd
from appbe.eda import ux_eda
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
# from pathlib import Path
import configparser
import json
import matplotlib.pyplot as plt
import numpy as np
import os
def trustedai_uq(request):
try:
displaypath = os.path.join(request.session['deploypath'], "etc", "display.json")
f = open(displaypath, "r")
configSettings = f.read()
f.close()
configSettings = json.loads(configSettings)
TargetFeature = configSettings['targetFeature']
problemType = configSettings['problemType']
raw_data_loc = configSettings['preprocessedData']
dataLocation = configSettings['postprocessedData']
selectedfeatures = request.GET.get('values')
if problemType.lower() == "classification":
model = (os.path.join(request.session['deploypath'], 'model', configSettings['saved_model']))
df = pd.read_csv(dataLocation)
trainfea = df.columns.tolist()
feature = json.loads(selectedfeatures)
# feature = ",".join(featurs)
# features = ['PetalLengthCm','PetalWidthCm']
targ = TargetFeature
tar =[targ]
from bin.aion_uncertainties import aion_uq
outputStr = aion_uq(model,dataLocation,feature,tar)
return outputStr
if problemType.lower() == "regression":
model = (os.path.join(request.session['deploypath'], 'model', configSettings['saved_model']))
df = pd.read_csv(dataLocation)
trainfea = df.columns.tolist()
feature = json.loads(selectedfeatures)
# feature = ",".join(featurs)
# features = ['PetalLengthCm','PetalWidthCm']
targ = TargetFeature
tar =[targ]
from bin.aion_uncertainties import aion_uq
outputStr = aion_uq(model,dataLocation,feature,tar)
print(outputStr)
return outputStr
except Exception as e:
print('error',e)
return e<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import json
from pathlib import Path
def get_metrics(request):
output = {}
output_path = Path(request.session['deploypath'])/"etc"/"output.json"
if not output_path.exists():
raise ValueError('output json path does not exist, something unexpected happen')
with open(output_path) as file:
config = json.load(file)
output['problem_type'] = config.get('data',{}).get('ModelType')
output['best_model'] = config.get('data',{}).get('BestModel')
output['hyper_params'] = config.get('data',{}).get('params')
output['best_score'] = str(round(float(config.get('data',{}).get('BestScore')), 2))
output['scoring_method'] = config.get('data',{}).get('ScoreType')
if output['problem_type'] == 'classification':
output['mcc_score'] = str(round(float(config.get('data',{}).get('matrix',{}).get('MCC_SCORE', 0.0)), 2))
else:
output['mcc_score'] = 'NA'
return output
<s> import base64
import io
import json
import os
import urllib
import joblib
import numpy as np
import pandas as pd
from SALib.analyze import sobol
class sensitivityAnalysis():
def __init__(self, model, problemType, data, target, featureName):
self.model = model
self.probemType = problemType
self.data = data
self.target = target
|
self.featureName = featureName
self.paramvales = []
self.X = []
self.Y = []
self.problem = {}
def preprocess(self):
self.X = self.data[self.featureName].values
self.Y = self.data[self.target].values
bounds = [[np.min(self.X[:, i]), np.max(self.X[:, i])] for i in range(self.X.shape[1])]
self.problem = {
'num_vars': self.X.shape[1],
'names': self.featureName,
'bounds': bounds
}
def generate_samples(self,size):
from SALib.sample import sobol
self.param_values = sobol.sample(self.problem, size)
def calSiClass(self, satype,isML,isDL):
try:
D = self.problem['num_vars']
S = np.zeros(self.X.shape[1])
for class_label in np.unique(self.Y):
if isML:
y_pred_poba = self.model.predict_proba(self.param_values)[:, class_label]
if isDL:
y_pred_poba = self.model.predict(self.param_values)[:,class_label]
if not y_pred_poba.size % (2 * D + 2) == 0:
lim = y_pred_poba.size - y_pred_poba.size % (2 * D + 2)
y_pred_poba = y_pred_poba[:lim]
Si = sobol.analyze(self.problem, y_pred_poba)
if satype.lower() == 'first':
S += Si['S1']
else:
S += Si['ST']
S /= len(np.unique(self.Y))
return S
except Exception as e:
print('Error in calculating Si for Classification: ', str(e))
raise ValueError(str(e))
def calSiReg(self, satype,isML,isDL):
try:
D = self.problem['num_vars']
Y = np.array([self.model.predict(X_sample.reshape(1, -1)) for X_sample in self.param_values])
Y = Y.reshape(-1)
if not Y.size % (2 * D + 2) == 0:
lim = Y.size - Y.size % (2 * D + 2)
Y = Y[:lim]
Si = sobol.analyze(self.problem, Y)
if satype.lower() == 'first':
S = Si['S1']
else:
S = Si['ST']
return S
except Exception as e:
print('Error in calculating Si for Regression: ', str(e))
raise ValueError(str(e))
def plotSi(self, S, saType):
try:
import matplotlib.pyplot as plt
if saType.lower() == 'first':
title, label = 'Sensitivity Analysis', 'First order'
else:
title, label = 'Sensitivity Analysis', 'Total order'
x = np.arange(len(self.problem['names']))
width = 0.35
fig, ax = plt.subplots()
ax.bar(x - width / 2, S, width, label=label)
ax.set_xticks(x)
ax.set_xlabel('Features')
ax.set_ylabel('Sensitivity Indices')
ax.set_title(title)
ax.set_xticklabels(self.problem['names'], rotation=45, ha="right")
ax.legend()
plt.tight_layout()
image = io.BytesIO()
plt.savefig(image, format='png')
image.seek(0)
string = base64.b64encode(image.read())
SAimage = 'data:image/png;base64,' + urllib.parse.quote(string)
except Exception as e:
print(e)
SAimage = ''
return SAimage
def checkModelType(modelName):
isML= False
isDL = False
if modelName in ["Neural Network", "Convolutional Neural Network (1D)", "Recurrent Neural Network","Recurrent Neural Network (GRU)",
"Recurrent Neural Network (LSTM)", "Neural Architecture Search", "Deep Q Network", "Dueling Deep Q Network"]:
isDL = True
elif modelName in ["Linear Regression","Lasso","Ridge","Logistic Regression", "Naive Bayes", "Decision Tree", "Random Forest", "Support Vector Machine", "K Nearest Neighbors", "Gradient Boosting",
"Extreme Gradient Boosting (XGBoost)", "Light Gradient Boosting (LightGBM)", "Categorical Boosting (CatBoost)","Bagging (Ensemble)"]:
isML = True
return isML,isDL
def startSA(request):
try:
displaypath = os.path.join(request.session['deploypath'], "etc", "display.json")
if not os.path.exists(displaypath):
raise Exception('Config file not found.')
with open(displaypath) as file:
config = json.load(file)
probelmType = config['problemType']
if probelmType.lower() not in ['classification','regression']:
raise Exception(f"Probolem Type: {probelmType} not supported")
isML,isDL = checkModelType(config['modelname'])
sample_size = 1024
if isML:
model = joblib.load(os.path.join(request.session['deploypath'], 'model', config['saved_model']))
sample_size = 2048
if isDL:
from tensorflow.keras.models import load_model
model = load_model(os.path.join(request.session['deploypath'], 'model', config['saved_model']))
sample_size = 512
target = config['targetFeature']
featureName = config['modelFeatures']
dataPath = os.path.join(request.session['deploypath'], 'data', 'postprocesseddata.csv.gz')
if not os.path.exists(dataPath):
raise Exception('Data file not found.')
from utils.file_ops import read_df_compressed
read_status,dataFrame = read_df_compressed(dataPath)
obj = sensitivityAnalysis(model, probelmType, dataFrame, target, featureName)
obj.preprocess()
obj.generate_samples(sample_size)
submitType = str(request.GET.get('satype'))
saType = 'first' if submitType == 'first' else 'total'
if probelmType.lower() == 'classification':
SA_values = obj.calSiClass(saType,isML,isDL)
else:
SA_values = obj.calSiReg(saType,isML,isDL)
if SA_values.size and saType:
graph = obj.plotSi(SA_values, saType)
if graph:
outputJson = {'Status': "Success", "graph": graph}
else:
outputJson = {'Status': "Error", "graph": '','reason':'Error in Plotting Graph'}
else:
outputJson = {'Status': "Error", "graph": '','reason':'Error in calculating Si values'}
output_json = json.dumps(outputJson)
return output_json
except Exception as e:
print(str(e))
raise ValueError(str(e))
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import warnings
import numpy as np
import pandas as pd
import sklearn.metrics as metrics
from collections import defaultdict
from sklearn.metrics import confusion_matrix
import re
import shutil
import scipy.stats as st
import json
import os,sys
import glob
import logging
from utils.file_ops import read_df_compressed
class Visualization():
def __init__(self,usecasename,version,dataframe,visualizationJson,dateTimeColumn,deployPath,dataFolderLocation,numericContinuousFeatures,discreteFeatures,categoricalFeatures,modelFeatures,targetFeature,modeltype,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,labelMaps,vectorizerFeatures,textFeatures,numericalFeatures,nonNumericFeatures,emptyFeatures,nrows,ncols,saved_model,scoreParam,learner_type,modelname,featureReduction,reduction_data_file):
self.dataframe = dataframe
self.displayjson = {}
self.visualizationJson = visualizationJson
self.dateTimeColumn = dateTimeColumn
self.deployPath = deployPath
#shutil.copy2(os.path.join(os.path.dirname(os.path.abspath(__file__)),'aion_portal.py'),self.deployPath)
if learner_type == 'ML' and modelname != 'Neural Architecture Search':
if(os.path.isfile(os.path.join(self.deployPath,'explainable_ai.py'))):
os.remove(os.path.join(self.deployPath,'explainable_ai.py'))
shutil.copy2(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','utilities','xai','explainable_ai.py'),self.deployPath)
# os.rename(os.path.join(self.deployPath,'explainable_ai.py'),os.path.join(self.deployPath,'aion_xai.py'))
try:
os.rename(os.path.join(self.deployPath,'explainable_ai.py'),os.path.join(self.deployPath,'aion_xai.py'))
except FileExistsError:
os.remove(os.path.join(self.deployPath,'aion_xai.py'))
os.rename(os.path.join(self.deployPath,'explainable_ai.py'),os.path.join(self.deployPath,'aion_xai.py'))
elif learner_type == 'DL' or modelname == 'Neural Architecture Search':
if(os.path.isfile(os.path.join(self.deployPath,'explainable_ai.py'))):
os.remove(os.path.join(self.deployPath,'explainable_ai.py'))
shutil.copy2(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','utilities','xai','explainabledl_ai.py'),self.deployPath)
# os.rename(os.path.join(self.deployPath,'explainabledl_ai.py'),os.path.join(self.deployPath,'aion_xai.py'))
try:
os.rename(os.path.join(self.deployPath,'explainabledl_ai.py'),os.path.join(self.deployPath,'aion_xai.py'))
except FileExistsError:
os.remove(os.path.join(self.deployPath,'aion_xai.py'))
os.rename(os.path.join(self.deployPath,'explainabledl_ai.py'),os.path.join(self.deployPath,'aion_xai.py'))
self.jsondeployPath = deployPath
#self.deployPath = self.deployPath+'visualization/'
self.dataFolderLocation = dataFolderLocation
self.vectorizerFeatures = vectorizerFeatures
self.textFeatures = textFeatures
self.emptyFeatures = emptyFeatures
'''
try:
os.makedirs(self.deployPath)
except OSError as e:
print("\\nFolder Already Exists")
'''
self.numericContinuousFeatures = numericContinuousFeatures
self.discreteFeatures = discreteFeatures
self.categoricalFeatures = categoricalFeatures
self.modelFeatures = modelFeatures
self.modeltype = modeltype
self.targetFeature = targetFeature
self.displayjson['usecasename'] = str(usecasename)
self.displayjson['version'] = str(version)
self.displayjson['problemType'] = str(self.modeltype)
self.displayjson['targetFeature'] = self.targetFeature
self.displayjson['numericalFeatures'] = numericalFeatures
self.displayjson['nonNumericFeatures'] = nonNumericFeatures
self.displayjson['modelFeatures'] = self.modelFeatures
self.displayjson['textFeatures'] = self.textFeatures
self.displayjson['emptyFeatures'] = self.emptyFeatures
self.displayjson['modelname']= str(modelname)
self.displayjson['preprocessedData'] = str(original_data_file)
self.displayjson['nrows'] = str(nrows)
self.displayjson['ncols'] = str(ncols)
self.displayjson['saved_model'] = str(saved_model)
self.displayjson['scoreParam'] = str(scoreParam)
self.displayjson['labelMaps'] = eval(str(labelMaps))
self.original_data_file = original_data_file
self.displayjson['featureReduction'] = featureReduction
if featureReduction == 'True':
self.displayjson['reduction_data_file'] = reduction_data_file
else:
self.displayjson['reduction_data_file'] = ''
self.pred_filename = predicted_data_file
self.profiled_data_file = profiled_data_file
self.displayjson['predictedData'] = predicted_data_file
self.displayjson['postprocessedData'] = profiled_data_file
#self.trained_data_file = trained_data_file
#self.displayjson['trainingData'] = trained_data_file
#self.displayjson['categorialFeatures']=categoricalFeatures
#self.displayjson['discreteFeatures']=discreteFeatures
#self.displayjson['continuousFeatures']=numericContinuousFeatures
#y = json.dumps(self.displayjson)
#print(y)
self.labelMaps = labelMaps
self.log = logging.getLogger('eion')
def visualizationrecommandsystem(self):
try:
import tensorflow.keras.utils as kutils
datasetid = self.visualizationJson['datasetid']
self. |
log.info('\\n================== Data Profiling Details==================')
datacolumns=list(self.dataframe.columns)
self.log.info('================== Data Profiling Details End ==================\\n')
self.log.info('================== Features Correlation Details ==================\\n')
self.log.info('\\n================== Model Performance Analysis ==================')
if os.path.exists(self.pred_filename):
try:
status,df=read_df_compressed(self.pred_filename)
if self.modeltype == 'Classification' or self.modeltype == 'ImageClassification' or self.modeltype == 'anomaly_detection':
y_actual = df['actual'].values
y_predict = df['predict'].values
y_actual = kutils.to_categorical(y_actual)
y_predict = kutils.to_categorical(y_predict)
classes = df.actual.unique()
n_classes = y_actual.shape[1]
self.log.info('-------> ROC AUC CURVE')
roc_curve_dict = []
for i in classes:
try:
classname = i
if str(self.labelMaps) != '{}':
inv_map = {v: k for k, v in self.labelMaps.items()}
classname = inv_map[i]
fpr, tpr, threshold = metrics.roc_curve(y_actual[:,i],y_predict[:,i])
roc_auc = metrics.auc(fpr, tpr)
class_roc_auc_curve = {}
class_roc_auc_curve['class'] = str(classname)
fprstring = ','.join(str(v) for v in fpr)
tprstring = ','.join(str(v) for v in tpr)
class_roc_auc_curve['FP'] = str(fprstring)
class_roc_auc_curve['TP'] = str(tprstring)
roc_curve_dict.append(class_roc_auc_curve)
self.log.info('----------> Class: '+str(classname))
self.log.info('------------> ROC_AUC: '+str(roc_auc))
self.log.info('------------> False Positive Rate (x Points): '+str(fpr))
self.log.info('------------> True Positive Rate (y Points): '+str(tpr))
except:
pass
self.displayjson['ROC_AUC_CURVE'] = roc_curve_dict
self.log.info('-------> Precision Recall CURVE')
precision_recall_curve_dict = []
for i in range(n_classes):
try:
lr_precision, lr_recall, threshold = metrics.precision_recall_curve(y_actual[:,i],y_predict[:,i])
classname = i
if str(self.labelMaps) != '{}':
inv_map = {v: k for k, v in self.labelMaps.items()}
classname = inv_map[i]
roc_auc = metrics.auc(lr_recall,lr_precision)
class_precision_recall_curve = {}
class_precision_recall_curve['class'] = str(classname)
Precisionstring = ','.join(str(round(v,2)) for v in lr_precision)
Recallstring = ','.join(str(round(v,2)) for v in lr_recall)
class_precision_recall_curve['Precision'] = str(Precisionstring)
class_precision_recall_curve['Recall'] = str(Recallstring)
precision_recall_curve_dict.append(class_precision_recall_curve)
except:
pass
self.log.info('----------> Class: '+str(classname))
self.log.info('------------> ROC_AUC: '+str(roc_auc))
self.log.info('------------> Recall (x Points): '+str(lr_precision))
self.log.info('------------> Precision (y Points): '+str(lr_recall))
self.displayjson['PRECISION_RECALL_CURVE'] = precision_recall_curve_dict
status,predictdataFrame=read_df_compressed(self.displayjson['predictedData'])
except Exception as e:
self.log.info('================== Error in Calculation ROC_AUC/Recall Precision Curve '+str(e))
self.log.info('================== Model Performance Analysis End ==================\\n')
self.log.info('\\n================== For Descriptive Analysis of Model Features ==================')
outputfile = os.path.join(self.jsondeployPath,'etc','display.json')
with open(outputfile, 'w') as fp:
json.dump(self.displayjson, fp)
self.log.info('================== For Descriptive Analysis of Model Features End ==================\\n')
except Exception as inst:
self.log.info('Visualization Failed !....'+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def drawlinechart(self,xcolumn,ycolumn,deploy_path,datasetid):
title = 'aion_visualization_'+xcolumn+"_"+ycolumn+"_linechart"
yaxisname = 'Average '+ycolumn
datasetindex = datasetid
visulizationjson = '[{"_id": "543234","_type": "visualization","_source": {"title": "'+title+'",'
visulizationjson = visulizationjson+'"visState": "{\\\\"title\\\\":\\\\"'+title+'\\\\",'
visulizationjson = visulizationjson+'\\\\"type\\\\":\\\\"line\\\\",\\\\"params\\\\":{\\\\"type\\\\":\\\\"line\\\\",\\\\"grid\\\\":{\\\\"categoryLines\\\\":false,\\\\"style\\\\":{\\\\"color\\\\":\\\\"#eee\\\\"}},\\\\"categoryAxes\\\\":[{\\\\"id\\\\":\\\\"CategoryAxis-1\\\\",\\\\"type\\\\":\\\\"category\\\\",\\\\"position\\\\":\\\\"bottom\\\\",\\\\"show\\\\":true,\\\\"style\\\\":{},\\\\"scale\\\\":{\\\\"type\\\\":\\\\"linear\\\\"},\\\\"labels\\\\":{\\\\"show\\\\":true,\\\\"truncate\\\\":100},\\\\"title\\\\":{}}],\\\\"valueAxes\\\\":[{\\\\"id\\\\":\\\\"ValueAxis-1\\\\",\\\\"name\\\\":\\\\"LeftAxis-1\\\\",\\\\"type\\\\":\\\\"value\\\\",\\\\"position\\\\":\\\\"left\\\\",\\\\"show\\\\":true,\\\\"style\\\\":{},\\\\"scale\\\\":{\\\\"type\\\\":\\\\"linear\\\\",\\\\"mode\\\\":\\\\"normal\\\\"},\\\\"labels\\\\":{\\\\"show\\\\":true,\\\\"rotate\\\\":0,\\\\"filter\\\\":false,\\\\"truncate\\\\":100},\\\\"title\\\\":'
visulizationjson = visulizationjson+'{\\\\"text\\\\":\\\\"'+yaxisname+'\\\\"}}],\\\\"seriesParams\\\\":[{\\\\"show\\\\":\\\\"true\\\\",\\\\"type\\\\":\\\\"line\\\\",\\\\"mode\\\\":\\\\"normal\\\\",\\\\"data\\\\":'
visulizationjson = visulizationjson+'{\\\\"label\\\\":\\\\"'+yaxisname+'\\\\",\\\\"id\\\\":\\\\"1\\\\"},\\\\"valueAxis\\\\":\\\\"ValueAxis-1\\\\",\\\\"drawLinesBetweenPoints\\\\":true,\\\\"showCircles\\\\":true}],\\\\"addTooltip\\\\":true,\\\\"addLegend\\\\":true,\\\\"legendPosition\\\\":\\\\"right\\\\",\\\\"times\\\\":[],\\\\"addTimeMarker\\\\":false},\\\\"aggs\\\\":[{\\\\"id\\\\":\\\\"1\\\\",\\\\"enabled\\\\":true,\\\\"type\\\\":\\\\"avg\\\\",\\\\"schema\\\\":\\\\"metric\\\\",\\\\"params\\\\":{\\\\"field\\\\":\\\\"'+str(ycolumn)+'\\\\"}},{\\\\"id\\\\":\\\\"2\\\\",\\\\"enabled\\\\":true,\\\\"type\\\\":\\\\"terms\\\\",\\\\"schema\\\\":\\\\"segment\\\\",\\\\"params\\\\":{\\\\"field\\\\":\\\\"'+xcolumn+'\\\\",\\\\"size\\\\":100,\\\\"order\\\\":\\\\"desc\\\\",\\\\"orderBy\\\\":\\\\"1\\\\",\\\\"otherBucket\\\\":false,\\\\"otherBucketLabel\\\\":\\\\"Other\\\\",\\\\"missingBucket\\\\":false,\\\\"missingBucketLabel\\\\":\\\\"Missing\\\\"}}]}","uiStateJSON": "{}", "description": "","version": 1,"kibanaSavedObjectMeta": {"searchSourceJSON": "{\\\\"index\\\\":\\\\"'+datasetindex+'\\\\",\\\\"query\\\\":{\\\\"query\\\\":\\\\"\\\\",\\\\"language\\\\":\\\\"lucene\\\\"},\\\\"filter\\\\":[]}"}},"_migrationVersion": {"visualization": "6.7.2"}}]'
filename = deploy_path+title+'.json'
f = open(filename, "w")
f.write(str(visulizationjson))
f.close()
def drawbarchart(self,xcolumn,ycolumn,deploy_path,datasetid):
title = 'aion_visualization_'+xcolumn+"_"+ycolumn+"_barchart"
yaxisname = 'Average '+ycolumn
datasetindex = datasetid
visulizationjson = '[{"_id": "123456","_type": "visualization","_source": {"title":"'+title+'",'
visulizationjson = visulizationjson+'"visState": "{\\\\"title\\\\":\\\\"'+title+'\\\\",'
visulizationjson = visulizationjson+'\\\\"type\\\\":\\\\"histogram\\\\",\\\\"params\\\\":{\\\\"addLegend\\\\":true,\\\\"addTimeMarker\\\\":false,\\\\"addTooltip\\\\":true,\\\\"categoryAxes\\\\":[{\\\\"id\\\\":\\\\"CategoryAxis-1\\\\",\\\\"labels\\\\":{\\\\"show\\\\":true,\\\\"truncate\\\\":100},\\\\"position\\\\":\\\\"bottom\\\\",\\\\"scale\\\\":{\\\\"type\\\\":\\\\"linear\\\\"},\\\\"show\\\\":true,\\\\"style\\\\":{},\\\\"title\\\\":{},\\\\"type\\\\":\\\\"category\\\\"}],\\\\"grid\\\\":{\\\\"categoryLines\\\\":false,\\\\"style\\\\":{\\\\"color\\\\":\\\\"#eee\\\\"}},\\\\"legendPosition\\\\":\\\\"right\\\\",\\\\"seriesParams\\\\":[{\\\\"data\\\\":{\\\\"id\\\\":\\\\"1\\\\",'
visulizationjson = visulizationjson+'\\\\"label\\\\":\\\\"'+yaxisname+'\\\\"},'
visulizationjson = visulizationjson+'\\\\"drawLinesBetweenPoints\\\\":true,\\\\"mode\\\\":\\\\"stacked\\\\",\\\\"show\\\\":\\\\"true\\\\",\\\\"showCircles\\\\":true,\\\\"type\\\\":\\\\"histogram\\\\",\\\\"valueAxis\\\\":\\\\"ValueAxis-1\\\\"}],\\\\"times\\\\":[],\\\\"type\\\\":\\\\"histogram\\\\",\\\\"valueAxes\\\\":[{\\\\"id\\\\":\\\\"ValueAxis-1\\\\",\\\\"labels\\\\":{\\\\"filter\\\\":false,\\\\"rotate\\\\":0,\\\\"show\\\\":true,\\\\"truncate\\\\":100},\\\\"name\\\\":\\\\"LeftAxis-1\\\\",\\\\"position\\\\":\\\\"left\\\\",\\\\"scale\\\\":{\\\\"mode\\\\":\\\\"normal\\\\",\\\\"type\\\\":\\\\"linear\\\\"},\\\\"show\\\\":true,\\\\"style\\\\":{},\\\\"title\\\\":'
visulizationjson = visulizationjson+'{\\\\"text\\\\":\\\\"'+yaxisname+'\\\\"},'
visulizationjson = visulizationjson+'\\\\"type\\\\":\\\\"value\\\\"}]},\\\\"aggs\\\\":[{\\\\"id\\\\":\\\\"1\\\\",\\\\"enabled\\\\":true,\\\\"type\\\\":\\\\"avg\\\\",\\\\"schema\\\\":\\\\"metric\\\\",\\\\"params\\\\":{\\\\"field\\\\":\\\\"'+str(xcolumn)+'\\\\"}},{\\\\"id\\\\":\\\\"2\\\\",\\\\"enabled\\\\":true,\\\\"type\\\\":\\\\"terms\\\\",\\\\"schema\\\\":\\\\"segment\\\\",\\\\"params\\\\":{\\\\"field\\\\":\\\\"'+ycolumn+'\\\\",\\\\"size\\\\":100,\\\\"order\\\\":\\\\"asc\\\\",\\\\"orderBy\\\\":\\\\"1\\\\",\\\\"otherBucket\\\\":false,\\\\"otherBucketLabel\\\\":\\\\"Other\\\\",\\\\"missingBucket\\\\":false,\\\\"missingBucketLabel\\\\":\\\\"Missing\\\\"}}]}","uiStateJSON":"{}","description": "","version": 1,"kibanaSavedObjectMeta": {'
visulizationjson = visulizationjson+'"searchSourceJSON": "{\\\\"index\\\\":\\\\"'+datasetindex+'\\\\",\\\\"query\\\\":{\\\\"language\\\\":\\\\"lucene\\\\",\\\\"query\\\\":\\\\"\\\\"},\\\\"filter\\\\":[]}"}},"_migrationVersion":{"visualization": "6.7.2"}}]'
filename = deploy_path+title+'.json'
f = open(filename, "w")
f.write(str(visulizationjson))
f.close()
def drawpiechart(self,xcolumn,deploy_path,datasetid):
title = 'aion_visualization_'+xcolumn+"_piechart"
datasetindex = datasetid
visulizationjson = '[{"_id": "123456","_type": "visualization","_source": {"title":"'+title+'",'
visulizationjson = visulizationjson+'"visState": "{\\\\"title\\\\":\\\\"'+title+'\\\\",'
visulizationjson = visulizationjson+'\\\\"type\\\\":\\\\"pie\\\\",\\\\"params\\\\":{\\\\"type\\\\":\\\\"pie\\\\",\\\\"addTooltip\\\\":true,\\\\"addLegend\\\\":true,\\\\"legendPosition\\\\":\\\\"right\\\\",\\\\"isDonut\\\\":true,\\\\"labels\\\\":{\\\\"show\\\\":false,\\\\"values\\\\":true,\\\\"last_level\\\\":true,\\\\"truncate\\\\":100}},\\\\"ag |
gs\\\\":[{\\\\"id\\\\":\\\\"1\\\\",\\\\"enabled\\\\":true,\\\\"type\\\\":\\\\"count\\\\",\\\\"schema\\\\":\\\\"metric\\\\",\\\\"params\\\\":{}},{\\\\"id\\\\":\\\\"2\\\\",\\\\"enabled\\\\":true,\\\\"type\\\\":\\\\"terms\\\\",\\\\"schema\\\\":\\\\"segment\\\\",\\\\"params\\\\":{\\\\"field\\\\":\\\\"'+xcolumn+'\\\\",\\\\"size\\\\":100,\\\\"order\\\\":\\\\"asc\\\\",\\\\"orderBy\\\\":\\\\"1\\\\",\\\\"otherBucket\\\\":false,\\\\"otherBucketLabel\\\\":\\\\"Other\\\\",\\\\"missingBucket\\\\":false,\\\\"missingBucketLabel\\\\":\\\\"Missing\\\\"}}]}",'
visulizationjson = visulizationjson+'"uiStateJSON": "{}","description": "","version": 1,"kibanaSavedObjectMeta": {"searchSourceJSON":"{\\\\"index\\\\":\\\\"'+datasetid+'\\\\",\\\\"query\\\\":{\\\\"query\\\\":\\\\"\\\\",\\\\"language\\\\":\\\\"lucene\\\\"},\\\\"filter\\\\":[]}"}},"_migrationVersion": {"visualization": "6.7.2"}}]'
filename = deploy_path+title+'.json'
f = open(filename, "w")
f.write(str(visulizationjson))
f.close()
def get_confusion_matrix(self,df):
setOfyTrue = set(df['actual'])
unqClassLst = list(setOfyTrue)
if(str(self.labelMaps) != '{}'):
inv_mapping_dict = {v: k for k, v in self.labelMaps.items()}
unqClassLst2 = (pd.Series(unqClassLst)).map(inv_mapping_dict)
unqClassLst2 = list(unqClassLst2)
else:
unqClassLst2 = unqClassLst
indexName = []
columnName = []
for item in unqClassLst2:
indexName.append("act:"+str(item))
columnName.append("pre:"+str(item))
result = pd.DataFrame(confusion_matrix(df['actual'], df['predict'], labels = unqClassLst),index = indexName, columns = columnName)
resultjson = result.to_json(orient='index')
return(resultjson)
def DistributionFinder(self,data):
try:
distributionName =""
sse =0.0
KStestStatic=0.0
dataType=""
if(data.dtype == "float64"):
dataType ="Continuous"
elif(data.dtype =="int" or data.dtype =="int64"):
dataType="Discrete"
if(dataType == "Discrete"):
distributions= [st.bernoulli,st.binom,st.geom,st.nbinom,st.poisson]
index, counts = np.unique(abs(data.astype(int)),return_counts=True)
if(len(index)>=2):
best_sse = np.inf
y1=[]
total=sum(counts)
mean=float(sum(index*counts))/total
variance=float((sum(index**2*counts) -total*mean**2))/(total-1)
dispersion=mean/float(variance)
theta=1/float(dispersion)
r=mean*(float(theta)/1-theta)
for j in counts:
y1.append(float(j)/total)
pmf1=st.bernoulli.pmf(index,mean)
pmf2=st.binom.pmf(index,len(index),p=mean/len(index))
pmf3=st.geom.pmf(index,1/float(1+mean))
pmf4=st.nbinom.pmf(index,mean,r)
pmf5=st.poisson.pmf(index,mean)
sse1 = np.sum(np.power(y1 - pmf1, 2.0))
sse2 = np.sum(np.power(y1 - pmf2, 2.0))
sse3 = np.sum(np.power(y1 - pmf3, 2.0))
sse4 = np.sum(np.power(y1 - pmf4, 2.0))
sse5 = np.sum(np.power(y1- pmf5, 2.0))
sselist=[sse1,sse2,sse3,sse4,sse5]
for i in range(0,len(sselist)):
if best_sse > sselist[i] > 0:
best_distribution = distributions[i].name
best_sse = sselist[i]
elif (len(index) == 1):
best_distribution = "Constant Data-No Distribution"
best_sse = 0.0
distributionName =best_distribution
sse=best_sse
elif(dataType == "Continuous"):
distributions = [st.uniform,st.expon,st.weibull_max,st.weibull_min,st.chi,st.norm,st.lognorm,st.t,st.gamma,st.beta]
best_distribution = st.norm.name
best_sse = np.inf
datamin=data.min()
datamax=data.max()
nrange=datamax-datamin
y, x = np.histogram(data.astype(float), bins='auto', density=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
for distribution in distributions:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
params = distribution.fit(data.astype(float))
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Calculate fitted PDF and error with fit in distribution
pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)
sse = np.sum(np.power(y - pdf, 2.0))
if(best_sse >sse > 0):
best_distribution = distribution.name
best_sse = sse
distributionName =best_distribution
sse=best_sse
except:
response = str(sys.exc_info()[0])
message='Job has Failed'+response
print(message)
return distributionName,sse
<s><s> import autograd
import autograd.numpy as np
import scipy.optimize
from autograd import grad
from autograd.scipy.special import logsumexp
from sklearn.cluster import KMeans
class HMM:
"""
A Hidden Markov Model with Gaussian observations with
unknown means and known precisions.
"""
def __init__(self, X, config_dict=None):
self.N, self.T, self.D = X.shape
self.K = config_dict['K'] # number of HMM states
self.I = np.eye(self.K)
self.Precision = np.zeros([self.D, self.D, self.K])
self.X = X
if config_dict['precision'] is None:
for k in np.arange(self.K):
self.Precision[:, :, k] = np.eye(self.D)
else:
self.Precision = config_dict['precision']
self.dParams_dWeights = None
self.alphaT = None # Store the final beliefs.
self.beta1 = None # store the first timestep beliefs from the beta recursion.
self.forward_trellis = {} # stores \\alpha
self.backward_trellis = {} # stores \\beta
def initialize_params(self, seed=1234):
np.random.seed(seed)
param_dict = {}
A = np.random.randn(self.K, self.K)
# use k-means to initialize the mean parameters
X = self.X.reshape([-1, self.D])
kmeans = KMeans(n_clusters=self.K, random_state=seed,
n_init=15).fit(X)
labels = kmeans.labels_
_, counts = np.unique(labels, return_counts=True)
pi = counts
phi = kmeans.cluster_centers_
param_dict['A'] = np.exp(A)
param_dict['pi0'] = pi
param_dict['phi'] = phi
return self.pack_params(param_dict)
def unpack_params(self, params):
param_dict = dict()
K = self.K
# For unpacking simplex parameters: have packed them as
# log(pi[:-1]) - log(pi[-1]).
unnorm_A = np.exp(np.append(params[:K**2-K].reshape(K, K-1),
np.zeros((K, 1)),
axis=1)
)
Z = np.sum(unnorm_A[:, :-1], axis=1)
unnorm_A /= Z[:, np.newaxis]
norm_A = unnorm_A / unnorm_A.sum(axis=1, keepdims=True)
param_dict['A'] = norm_A
unnorm_pi = np.exp(np.append(params[K**2-K:K**2-1], 0.0))
Z = np.sum(unnorm_pi[:-1])
unnorm_pi /= Z
param_dict['pi0'] = unnorm_pi / unnorm_pi.sum()
param_dict['phi'] = params[K**2-K+K-1:].reshape(self.D, K)
return param_dict
def weighted_alpha_recursion(self, xseq, pi, phi, Sigma, A, wseq, store_belief=False):
"""
Computes the weighted marginal probability of the sequence xseq given parameters;
weights wseq turn on or off the emissions p(x_t | z_t) (weighting scheme B)
:param xseq: T * D
:param pi: K * 1
:param phi: D * K
:param wseq: T * 1
:param A:
:return:
"""
ll = self.log_obs_lik(xseq[:, :, np.newaxis], phi[np.newaxis, :, :], Sigma)
alpha = np.log(pi.ravel()) + wseq[0] * ll[0]
if wseq[0] == 0:
self.forward_trellis[0] = alpha[:, np.newaxis]
for t in np.arange(1, self.T):
alpha = logsumexp(alpha[:, np.newaxis] + np.log(A), axis=0) + wseq[t] * ll[t]
if wseq[t] == 0:
# store the trellis, would be used to compute the posterior z_t | x_1...x_t-1, x_t+1, ...x_T
self.forward_trellis[t] = alpha[:, np.newaxis]
if store_belief:
# store the final belief
self.alphaT = alpha
return logsumexp(alpha)
def weighted_beta_recursion(self, xseq, pi, phi, Sigma, A, wseq, store_belief=False):
"""
Runs beta recursion;
weights wseq turn on or off the emissions p(x_t | z_t) (weighting scheme B)
:param xseq: T * D
:param pi: K * 1
:param phi: D * K
:param wseq: T * 1
:param A:
:return:
"""
ll = self.log_obs_lik(xseq[:, :, np.newaxis], phi[np.newaxis, :, :], Sigma)
beta = np.zeros_like(pi.ravel()) # log(\\beta) of all ones.
max_t = ll.shape[0]
if wseq[max_t - 1] == 0:
# store the trellis, would be used to compute the posterior z_t | x_1...x_t-1, x_t+1, ...x_T
self.backward_trellis[max_t - 1] = beta[:, np.newaxis]
for i in np.arange(1, max_t):
t = max_t - i - 1
beta = logsumexp((beta + wseq[t + 1] * ll[t + 1])[np.newaxis, :] + np.log(A), axis=1)
if wseq[t] == 0:
# store the trellis, would be used to compute the posterior z_t | x_1...x_t-1, x_t+1, ...x_T
self.backward_trellis[t] = beta[:, np.newaxis]
# account for the init prob
beta = (beta + wseq[0] * ll[0]) + np.log(pi.ravel())
if store_belief:
# store the final belief
self.beta1 = beta
return logsumexp(beta)
def weighted_loss(self, params, weights):
"""
For LOOCV / IF computation within a single sequence. Uses weighted alpha recursion
:param params:
:param weights:
:return:
"""
param_dict = self.unpack_params(params)
logp = self.get_prior_contrib(param_dict)
logp = logp + self.weighted_alpha_recursion(self.X[0], param_dict['pi0'],
param_dict['phi'],
self.Precision,
param_dict['A'],
weights)
return -logp
def loss_at_missing_timesteps(self, weights, params):
"""
:param weights: zeroed out weights indicate missing values
:param params: packed parameters
:return:
"""
# empty forward and |
backward trellis
self.clear_trellis()
param_dict = self.unpack_params(params)
# populate forward and backward trellis
lpx = self.weighted_alpha_recursion(self.X[0], param_dict['pi0'],
param_dict['phi'],
self.Precision,
param_dict['A'],
weights,
store_belief=True )
lpx_alt = self.weighted_beta_recursion(self.X[0], param_dict['pi0'],
param_dict['phi'],
self.Precision,
param_dict['A'],
weights,
store_belief=True)
assert np.allclose(lpx, lpx_alt) # sanity check
test_ll = []
# compute loo likelihood
ll = self.log_obs_lik(self.X[0][:, :, np.newaxis], param_dict['phi'], self.Precision)
# compute posterior p(z_t | x_1,...t-1, t+1,...T) \\forall missing t
tsteps = []
for t in self.forward_trellis.keys():
lpz_given_x = self.forward_trellis[t] + self.backward_trellis[t] - lpx
test_ll.append(logsumexp(ll[t] + lpz_given_x.ravel()))
tsteps.append(t)
# empty forward and backward trellis
self.clear_trellis()
return -np.array(test_ll)
def fit(self, weights, init_params=None, num_random_restarts=1, verbose=False, maxiter=None):
if maxiter:
options_dict = {'disp': verbose, 'gtol': 1e-10, 'maxiter': maxiter}
else:
options_dict = {'disp': verbose, 'gtol': 1e-10}
# Define a function that returns gradients of training loss using Autograd.
training_loss_fun = lambda params: self.weighted_loss(params, weights)
training_gradient_fun = grad(training_loss_fun, 0)
if init_params is None:
init_params = self.initialize_params()
if verbose:
print("Initial loss: ", training_loss_fun(init_params))
res = scipy.optimize.minimize(fun=training_loss_fun,
jac=training_gradient_fun,
x0=init_params,
tol=1e-10,
options=options_dict)
if verbose:
print('grad norm =', np.linalg.norm(res.jac))
return res.x
def clear_trellis(self):
self.forward_trellis = {}
self.backward_trellis = {}
#### Required for IJ computation ###
def compute_hessian(self, params_one, weights_one):
return autograd.hessian(self.weighted_loss, argnum=0)(params_one, weights_one)
def compute_jacobian(self, params_one, weights_one):
return autograd.jacobian(autograd.jacobian(self.weighted_loss, argnum=0), argnum=1)\\
(params_one, weights_one).squeeze()
###################################################
@staticmethod
def log_obs_lik(x, phi, Sigma):
"""
:param x: T*D*1
:param phi: 1*D*K
:param Sigma: D*D*K --- precision matrices per state
:return: ll
"""
centered_x = x - phi
ll = -0.5 * np.einsum('tdk, tdk, ddk -> tk', centered_x, centered_x, Sigma )
return ll
@staticmethod
def pack_params(params_dict):
param_list = [(np.log(params_dict['A'][:, :-1]) -
np.log(params_dict['A'][:, -1])[:, np.newaxis]).ravel(),
np.log(params_dict['pi0'][:-1]) - np.log(params_dict['pi0'][-1]),
params_dict['phi'].ravel()]
return np.concatenate(param_list)
@staticmethod
def get_prior_contrib(param_dict):
logp = 0.0
# Prior
logp += -0.5 * (np.linalg.norm(param_dict['phi'], axis=0) ** 2).sum()
logp += (1.1 - 1) * np.log(param_dict['A']).sum()
logp += (1.1 - 1) * np.log(param_dict['pi0']).sum()
return logp
@staticmethod
def get_indices_in_held_out_fold(T, pct_to_drop, contiguous=False):
"""
:param T: length of the sequence
:param pct_to_drop: % of T in the held out fold
:param contiguous: if True generate a block of indices to drop else generate indices by iid sampling
:return: o (the set of indices in the fold)
"""
if contiguous:
l = np.floor(pct_to_drop / 100. * T)
anchor = np.random.choice(np.arange(l + 1, T))
o = np.arange(anchor - l, anchor).astype(int)
else:
# i.i.d LWCV
o = np.random.choice(T - 2, size=np.int(pct_to_drop / 100. * T), replace=False) + 1
return o
@staticmethod
def synthetic_hmm_data(K, T, D, sigma0=None, seed=1234, varainces_of_mean=1.0,
diagonal_upweight=False):
"""
:param K: Number of HMM states
:param T: length of the sequence
"""
N = 1 # For structured IJ we will remove data / time steps from a single sequence
np.random.seed(seed)
if sigma0 is None:
sigma0 = np.eye(D)
A = np.random.dirichlet(alpha=np.ones(K), size=K)
if diagonal_upweight:
A = A + 3 * np.eye(K) # add 3 to the diagonal and renormalize to encourage self transitions
A = A / A.sum(axis=1)
pi0 = np.random.dirichlet(alpha=np.ones(K))
mus = np.random.normal(size=(K, D), scale=np.sqrt(varainces_of_mean))
zs = np.empty((N, T), dtype=np.int)
X = np.empty((N, T, D))
for n in range(N):
zs[n, 0] = int(np.random.choice(np.arange(K), p=pi0))
X[n, 0] = np.random.multivariate_normal(mean=mus[zs[n, 0]], cov=sigma0)
for t in range(1, T):
zs[n, t] = int(np.random.choice(np.arange(K), p=A[zs[n, t - 1], :]))
X[n, t] = np.random.multivariate_normal(mean=mus[zs[n, t]], cov=sigma0)
return {'X': X, 'state_assignments': zs, 'A': A, 'initial_state_assignment': pi0, 'means': mus}
<s><s> from builtins import range
import autograd.numpy as np
def adam(grad, x, callback=None, num_iters=100, step_size=0.001, b1=0.9, b2=0.999, eps=10**-8, polyak=False):
"""Adapted from autograd.misc.optimizers"""
m = np.zeros(len(x))
v = np.zeros(len(x))
for i in range(num_iters):
g = grad(x, i)
if callback: callback(x, i, g, polyak)
m = (1 - b1) * g + b1 * m # First moment estimate.
v = (1 - b2) * (g**2) + b2 * v # Second moment estimate.
mhat = m / (1 - b1**(i + 1)) # Bias correction.
vhat = v / (1 - b2**(i + 1))
x = x - step_size*mhat/(np.sqrt(vhat) + eps)
return x<s> import matplotlib.pyplot as plt
import numpy as np
import numpy.random as npr
import torch as torch
def make_data_gap(seed, data_count=100):
import GPy
npr.seed(0)
x = np.hstack([np.linspace(-5, -2, int(data_count/2)), np.linspace(2, 5, int(data_count/2))])
x = x[:, np.newaxis]
k = GPy.kern.RBF(input_dim=1, variance=1., lengthscale=1.)
K = k.K(x)
L = np.linalg.cholesky(K + 1e-5 * np.eye(data_count))
# draw a noise free random function from a GP
eps = np.random.randn(data_count)
f = L @ eps
# use a homoskedastic Gaussian noise model N(f(x)_i, \\sigma^2). \\sigma^2 = 0.1
eps_noise = np.sqrt(0.1) * np.random.randn(data_count)
y = f + eps_noise
y = y[:, np.newaxis]
plt.plot(x, f, 'ko', ms=2)
plt.plot(x, y, 'ro')
plt.title("GP generated Data")
plt.pause(1)
return torch.FloatTensor(x), torch.FloatTensor(y), torch.FloatTensor(x), torch.FloatTensor(y)
def make_data_sine(seed, data_count=450):
# fix the random seed
np.random.seed(seed)
noise_var = 0.1
X = np.linspace(-4, 4, data_count)
y = 1*np.sin(X) + np.sqrt(noise_var)*npr.randn(data_count)
train_count = int (0.2 * data_count)
idx = npr.permutation(range(data_count))
X_train = X[idx[:train_count], np.newaxis ]
X_test = X[ idx[train_count:], np.newaxis ]
y_train = y[ idx[:train_count] ]
y_test = y[ idx[train_count:] ]
mu = np.mean(X_train, 0)
std = np.std(X_train, 0)
X_train = (X_train - mu) / std
X_test = (X_test - mu) / std
mu = np.mean(y_train, 0)
std = np.std(y_train, 0)
# mu = 0
# std = 1
y_train = (y_train - mu) / std
y_test = (y_test -mu) / std
train_stats = dict()
train_stats['mu'] = torch.FloatTensor([mu])
train_stats['sigma'] = torch.FloatTensor([std])
return torch.FloatTensor(X_train), torch.FloatTensor(y_train), torch.FloatTensor(X_test), torch.FloatTensor(y_test),\\
train_stats<s> import autograd
import autograd.numpy as np
import numpy.random as npr
import scipy.optimize
sigmoid = lambda x: 0.5 * (np.tanh(x / 2.) + 1)
get_num_train = lambda inputs: inputs.shape[0]
logistic_predictions = lambda params, inputs: sigmoid(np.dot(inputs, params))
class LogisticRegression:
def __init__(self):
self.params = None
def set_parameters(self, params):
self.params = params
def predict(self, X):
if self.params is not None:
# Outputs probability of a label being true according to logistic model
return np.atleast_2d(sigmoid(np.dot(X, self.params))).T
else:
raise RuntimeError("Params need to be fit before predictions can be made.")
def loss(self, params, weights, inputs, targets):
# Training loss is the negative log-likelihood of the training labels.
preds = logistic_predictions(params, inputs)
label_probabilities = preds * targets + (1 - preds) * (1 - targets)
return -np.sum(weights * np.log(label_probabilities + 1e-16))
def fit(self, weights, init_params, inputs, targets, verbose=True):
training_loss_fun = lambda params: self.loss(params, weights, inputs, targets)
# Define a function that returns gradients of training loss using Autograd.
training_gradient_fun = autograd.grad(training_loss_fun, 0)
# optimize params
if verbose:
print("Initial loss:", self.loss(init_params, weights, inputs, targets))
# opt_params = sgd(training_gradient_fun, params, hyper=1, num_iters=5000, step_size=0.1)
res = scipy.optimize.minimize(fun=training_loss_fun,
jac=training_gradient_fun,
x0=init_params,
tol=1e-10,
options={'disp': verbose})
opt_params = res.x
if verbose:
print("Trained loss:", self.loss(opt_params, weights, inputs, targets))
self.params = opt_params
return opt_params
def get_test_acc(self, params, test_targets, test_inputs):
preds = np.round(self.predict(test_inputs).T).astype(np.int)
err = np.abs(test_targets - preds).sum()
return 1 - err/ test_targets.shape[1]
#### Required for IJ computation ###
def compute_hessian(self, params_one, weights_one, inputs, targets):
return autograd.hessian(self.loss, argnum=0)(params_one, weights_one, inputs, targets)
def compute_jacobian(self, params_one, weights_one, inputs, targets):
return autograd.jacobian(autograd.jacobian(self.loss, argnum=0), argnum=1)\\
(params_one, weights_one, inputs, targets).squeeze()
###################################################
@staticmethod
def synthetic_lr_data(N=10000, D=10):
x = 1. * npr.randn(N, D)
x_test = 1. * npr.randn(int(0.3 * N |
), D)
w = npr.randn(D, 1)
y = sigmoid((x @ w)).ravel()
y = npr.binomial(n=1, p=y) # corrupt labels
y_test = sigmoid(x_test @ w).ravel()
# y_test = np.round(y_test)
y_test = npr.binomial(n=1, p=y_test)
return x, np.atleast_2d(y), x_test, np.atleast_2d(y_test)
<s> import abc
import sys
# Ensure compatibility with Python 2/3
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta(str('ABC'), (), {})
from copy import deepcopy
import numpy as np
import numpy.random as npr
def make_batches(n_data, batch_size):
return [slice(i, min(i+batch_size, n_data)) for i in range(0, n_data, batch_size)]
def generate_regression_data(seed, data_count=500):
"""
Generate data from a noisy sine wave.
:param seed: random number seed
:param data_count: number of data points.
:return:
"""
np.random.seed(seed)
noise_var = 0.1
x = np.linspace(-4, 4, data_count)
y = 1*np.sin(x) + np.sqrt(noise_var)*npr.randn(data_count)
train_count = int (0.2 * data_count)
idx = npr.permutation(range(data_count))
x_train = x[idx[:train_count], np.newaxis ]
x_test = x[ idx[train_count:], np.newaxis ]
y_train = y[ idx[:train_count] ]
y_test = y[ idx[train_count:] ]
mu = np.mean(x_train, 0)
std = np.std(x_train, 0)
x_train = (x_train - mu) / std
x_test = (x_test - mu) / std
mu = np.mean(y_train, 0)
std = np.std(y_train, 0)
y_train = (y_train - mu) / std
train_stats = dict()
train_stats['mu'] = mu
train_stats['sigma'] = std
return x_train, y_train, x_test, y_test, train_stats
def form_D_for_auucc(yhat, zhatl, zhatu):
# a handy routine to format data as needed by the UCC fit() method
D = np.zeros([yhat.shape[0], 3])
D[:, 0] = yhat.squeeze()
D[:, 1] = zhatl.squeeze()
D[:, 2] = zhatu.squeeze()
return D
def fitted_ucc_w_nullref(y_true, y_pred_mean, y_pred_lower, y_pred_upper):
"""
Instantiates an UCC object for the target predictor plus a 'null' (constant band) reference
:param y_pred_lower:
:param y_pred_mean:
:param y_pred_upper:
:param y_true:
:return: ucc object fitted for two systems: target + null reference
"""
# form matrix for ucc:
X_for_ucc = form_D_for_auucc(y_pred_mean.squeeze(),
y_pred_mean.squeeze() - y_pred_lower.squeeze(),
y_pred_upper.squeeze() - y_pred_mean.squeeze())
# form matrix for a 'null' system (constant band)
X_null = deepcopy(X_for_ucc)
X_null[:,1:] = np.std(y_pred_mean) # can be set to any other constant (no effect on AUUCC)
# create an instance of ucc and fit data
from uq360.metrics.uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve as ucc
u = ucc()
u.fit([X_for_ucc, X_null], y_true.squeeze())
return u
def make_sklearn_compatible_scorer(task_type, metric, greater_is_better=True, **kwargs):
"""
Args:
task_type: (str) regression or classification.
metric: (str): choice of metric can be one of these - [aurrrc, ece, auroc, nll, brier, accuracy] for
classification and ["rmse", "nll", "auucc_gain", "picp", "mpiw", "r2"] for regression.
greater_is_better: is False the scores are negated before returning.
**kwargs: additional arguments specific to some metrics.
Returns:
sklearn compatible scorer function.
"""
from uq360.metrics.classification_metrics import compute_classification_metrics
from uq360.metrics.regression_metrics import compute_regression_metrics
def sklearn_compatible_score(model, X, y_true):
"""
Args:
model: The model being scored. Currently uq360 and sklearn models are supported.
X: Input features.
y_true: ground truth values for the target.
Returns:
Computed score of the model.
"""
from uq360.algorithms.builtinuq import BuiltinUQ
from uq360.algorithms.posthocuq import PostHocUQ
if isinstance(model, BuiltinUQ) or isinstance(model, PostHocUQ):
# uq360 models
if task_type == "classification":
score = compute_classification_metrics(
y_true=y_true,
y_prob=model.predict(X).y_prob,
option=metric,
**kwargs
)[metric]
elif task_type == "regression":
y_mean, y_lower, y_upper = model.predict(X)
score = compute_regression_metrics(
y_true=y_true,
y_mean=y_mean,
y_lower=y_lower,
y_upper=y_upper,
option=metric,
**kwargs
)[metric]
else:
raise NotImplementedError
else:
# sklearn models
if task_type == "classification":
score = compute_classification_metrics(
y_true=y_true,
y_prob=model.predict_proba(X),
option=metric,
**kwargs
)[metric]
else:
if metric in ["rmse", "r2"]:
score = compute_regression_metrics(
y_true=y_true,
y_mean=model.predict(X),
y_lower=None,
y_upper=None,
option=metric,
**kwargs
)[metric]
else:
raise NotImplementedError("{} is not supported for sklearn regression models".format(metric))
if not greater_is_better:
score = -score
return score
return sklearn_compatible_score
class DummySklearnEstimator(ABC):
def __init__(self, num_classes, base_model_prediction_fn):
self.base_model_prediction_fn = base_model_prediction_fn
self.classes_ = [i for i in range(num_classes)]
def fit(self):
pass
def predict_proba(self, X):
return self.base_model_prediction_fn(X)
<s> # Adapted from https://github.com/Trusted-AI/AIX360/blob/master/aix360/datasets/meps_dataset.py
# Utilization target is kept as a continuous target.
import os
import pandas as pd
def default_preprocessing(df):
"""
1.Create a new column, RACE that is 'White' if RACEV2X = 1 and HISPANX = 2 i.e. non Hispanic White
and 'non-White' otherwise
2. Restrict to Panel 19
3. RENAME all columns that are PANEL/ROUND SPECIFIC
4. Drop rows based on certain values of individual features that correspond to missing/unknown - generally < -1
5. Compute UTILIZATION.
"""
def race(row):
if ((row['HISPANX'] == 2) and (row['RACEV2X'] == 1)): #non-Hispanic Whites are marked as WHITE; all others as NON-WHITE
return 'White'
return 'Non-White'
df['RACEV2X'] = df.apply(lambda row: race(row), axis=1)
df = df.rename(columns = {'RACEV2X' : 'RACE'})
df = df[df['PANEL'] == 19]
# RENAME COLUMNS
df = df.rename(columns = {'FTSTU53X' : 'FTSTU', 'ACTDTY53' : 'ACTDTY', 'HONRDC53' : 'HONRDC', 'RTHLTH53' : 'RTHLTH',
'MNHLTH53' : 'MNHLTH', 'CHBRON53' : 'CHBRON', 'JTPAIN53' : 'JTPAIN', 'PREGNT53' : 'PREGNT',
'WLKLIM53' : 'WLKLIM', 'ACTLIM53' : 'ACTLIM', 'SOCLIM53' : 'SOCLIM', 'COGLIM53' : 'COGLIM',
'EMPST53' : 'EMPST', 'REGION53' : 'REGION', 'MARRY53X' : 'MARRY', 'AGE53X' : 'AGE',
'POVCAT15' : 'POVCAT', 'INSCOV15' : 'INSCOV'})
df = df[df['REGION'] >= 0] # remove values -1
df = df[df['AGE'] >= 0] # remove values -1
df = df[df['MARRY'] >= 0] # remove values -1, -7, -8, -9
df = df[df['ASTHDX'] >= 0] # remove values -1, -7, -8, -9
df = df[(df[['FTSTU','ACTDTY','HONRDC','RTHLTH','MNHLTH','HIBPDX','CHDDX','ANGIDX','EDUCYR','HIDEG',
'MIDX','OHRTDX','STRKDX','EMPHDX','CHBRON','CHOLDX','CANCERDX','DIABDX',
'JTPAIN','ARTHDX','ARTHTYPE','ASTHDX','ADHDADDX','PREGNT','WLKLIM',
'ACTLIM','SOCLIM','COGLIM','DFHEAR42','DFSEE42','ADSMOK42',
'PHQ242','EMPST','POVCAT','INSCOV']] >= -1).all(1)] #for all other categorical features, remove values < -1
def utilization(row):
return row['OBTOTV15'] + row['OPTOTV15'] + row['ERTOT15'] + row['IPNGTD15'] + row['HHTOTD15']
df['TOTEXP15'] = df.apply(lambda row: utilization(row), axis=1)
df = df.rename(columns = {'TOTEXP15' : 'UTILIZATION'})
df = df[['REGION','AGE','SEX','RACE','MARRY',
'FTSTU','ACTDTY','HONRDC','RTHLTH','MNHLTH','HIBPDX','CHDDX','ANGIDX',
'MIDX','OHRTDX','STRKDX','EMPHDX','CHBRON','CHOLDX','CANCERDX','DIABDX',
'JTPAIN','ARTHDX','ARTHTYPE','ASTHDX','ADHDADDX','PREGNT','WLKLIM',
'ACTLIM','SOCLIM','COGLIM','DFHEAR42','DFSEE42','ADSMOK42','PCS42',
'MCS42','K6SUM42','PHQ242','EMPST','POVCAT','INSCOV','UTILIZATION','PERWT15F']]
return df
class MEPSDataset():
"""
The Medical Expenditure Panel Survey (MEPS) [#]_ data consists of large scale surveys of families and individuals,
medical providers, and employers, and collects data on health services used, costs & frequency of services,
demographics, health status and conditions, etc., of the respondents.
This specific dataset contains MEPS survey data for calendar year 2015 obtained in rounds 3, 4, and 5 of Panel 19,
and rounds 1, 2, and 3 of Panel 20.
See :file:`uq360/datasets/data/meps_data/README.md` for more details on the dataset and instructions on downloading/processing the data.
References:
.. [#] `Medical Expenditure Panel Survey data <https://meps.ahrq.gov/mepsweb/>`_
"""
def __init__(self, custom_preprocessing=default_preprocessing, dirpath=None):
self._dirpath = dirpath
if not self._dirpath:
self._dirpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'meps_data')
self._filepath = os.path.join(self._dirpath, 'h181.csv')
try:
df = pd.read_csv(self._filepath, sep=',', na_values=[])
except IOError as err:
print("IOError: {}".format(err))
print("To use this class, please place the heloc_dataset.csv:")
print("file, as-is, in the folder:")
print("\\n\\t{}\\n".format(os.path.abspath(os.path.join(
os.path.abspath(__file__), 'data', 'meps_data'))))
import sys
sys.exit(1)
if custom_preprocessing:
self._data = custom_preprocessing(df)
def data(self):
return self._data<s> from .meps_dataset import MEPSDataset
<s><s> import abc
import sys
# Ensure compatibility with Python 2/3
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta(str('ABC'), (), {})
class BuiltinUQ(ABC):
""" BuiltinUQ is the base class for any algorithm that has UQ built into it.
"""
def __init__(self, *argv, **kwargs):
""" Initialize a BuiltinUQ object.
"""
@abc.abstractmethod
def fit(self, *argv, **kwargs):
""" Learn the UQ related parameters..
"""
raise NotImplementedError
@abc.abstractmethod
def predict(self, *argv, **kwargs):
|
""" Method to obtain the predicitve uncertainty, this can return the total, epistemic and/or aleatoric
uncertainty in the predictions.
"""
raise NotImplementedError
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
<s><s> import abc
import sys
# Ensure compatibility with Python 2/3
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta(str('ABC'), (), {})
class PostHocUQ(ABC):
""" PostHocUQ is the base class for any algorithm that quantifies uncertainty of a pre-trained model.
"""
def __init__(self, *argv, **kwargs):
""" Initialize a BuiltinUQ object.
"""
@abc.abstractmethod
def _process_pretrained_model(self, *argv, **kwargs):
""" Method to process the pretrained model that requires UQ.
"""
raise NotImplementedError
@abc.abstractmethod
def predict(self, *argv, **kwargs):
""" Method to obtain the predicitve uncertainty, this can return the total, epistemic and/or aleatoric
uncertainty in the predictions.
"""
raise NotImplementedError
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
def get_params(self):
"""
This method should not take any arguments and returns a dict of the __init__ parameters.
"""
raise NotImplementedError
<s> from collections import namedtuple
import numpy as np
import torch
from scipy.stats import norm
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
from uq360.algorithms.builtinuq import BuiltinUQ
from uq360.models.heteroscedastic_mlp import GaussianNoiseMLPNet as _MLPNet
np.random.seed(42)
torch.manual_seed(42)
class HeteroscedasticRegression(BuiltinUQ):
""" Wrapper for heteroscedastic regression. We learn to predict targets given features,
assuming that the targets are noisy and that the amount of noise varies between data points.
https://en.wikipedia.org/wiki/Heteroscedasticity
"""
def __init__(self, model_type=None, model=None, config=None, device=None, verbose=True):
"""
Args:
model_type: The base model architecture. Currently supported values are [mlp].
mlp modeltype learns a multi-layer perceptron with a heteroscedastic Gaussian likelihood. Both the
mean and variance of the Gaussian are functions of the data point ->git N(y_n | mlp_mu(x_n), mlp_var(x_n))
model: (optional) The prediction model. Currently support pytorch models that returns mean and log variance.
config: dictionary containing the config parameters for the model.
device: device used for pytorch models ignored otherwise.
verbose: if True, print statements with the progress are enabled.
"""
super(HeteroscedasticRegression).__init__()
self.config = config
self.device = device
self.verbose = verbose
if model_type == "mlp":
self.model_type = model_type
self.model = _MLPNet(
num_features=self.config["num_features"],
num_outputs=self.config["num_outputs"],
num_hidden=self.config["num_hidden"],
)
elif model_type == "custom":
self.model_type = model_type
self.model = model
else:
raise NotImplementedError
def get_params(self, deep=True):
return {"model_type": self.model_type, "config": self.config, "model": self.model,
"device": self.device, "verbose": self.verbose}
def _loss(self, y_true, y_pred_mu, y_pred_log_var):
return torch.mean(0.5 * torch.exp(-y_pred_log_var) * torch.abs(y_true - y_pred_mu) ** 2 +
0.5 * y_pred_log_var)
def fit(self, X, y):
""" Fit the Heteroscedastic Regression model.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the training data.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
X = torch.from_numpy(X).float().to(self.device)
y = torch.from_numpy(y).float().to(self.device)
dataset_loader = DataLoader(
TensorDataset(X,y),
batch_size=self.config["batch_size"]
)
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.config["lr"])
for epoch in range(self.config["num_epochs"]):
avg_loss = 0.0
for batch_x, batch_y in dataset_loader:
self.model.train()
batch_y_pred_mu, batch_y_pred_log_var = self.model(batch_x)
loss = self.model.loss(batch_y, batch_y_pred_mu, batch_y_pred_log_var)
optimizer.zero_grad()
loss.backward()
optimizer.step()
avg_loss += loss.item()/len(dataset_loader)
if self.verbose:
print("Epoch: {}, loss = {}".format(epoch, avg_loss))
return self
def predict(self, X, return_dists=False):
"""
Obtain predictions for the test points.
In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True)
and full predictive distribution (return_dists=True).
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
return_dists: If True, the predictive distribution for each instance using scipy distributions is returned.
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
dists: list of predictive distribution as `scipy.stats` objects with length n_samples.
Only returned when `return_dists` is True.
"""
self.model.eval()
X = torch.from_numpy(X).float().to(self.device)
dataset_loader = DataLoader(
X,
batch_size=self.config["batch_size"]
)
y_mean_list = []
y_log_var_list = []
for batch_x in dataset_loader:
batch_y_pred_mu, batch_y_pred_log_var = self.model(batch_x)
y_mean_list.append(batch_y_pred_mu.data.cpu().numpy())
y_log_var_list.append(batch_y_pred_log_var.data.cpu().numpy())
y_mean = np.concatenate(y_mean_list)
y_log_var = np.concatenate(y_log_var_list)
y_std = np.sqrt(np.exp(y_log_var))
y_lower = y_mean - 2.0*y_std
y_upper = y_mean + 2.0*y_std
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
if return_dists:
dists = [norm(loc=y_mean[i], scale=y_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_dists',))
res = Result(*res, y_dists=dists)
return res
<s> from .heteroscedastic_regression import HeteroscedasticRegression<s> from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from scipy.stats import norm
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
from uq360.algorithms.builtinuq import BuiltinUQ
np.random.seed(42)
torch.manual_seed(42)
class _MLPNet_Main(torch.nn.Module):
def __init__(self, num_features, num_outputs, num_hidden):
super(_MLPNet_Main, self).__init__()
self.fc = torch.nn.Linear(num_features, num_hidden)
self.fc_mu = torch.nn.Linear(num_hidden, num_outputs)
self.fc_log_var = torch.nn.Linear(num_hidden, num_outputs)
def forward(self, x):
x = F.relu(self.fc(x))
mu = self.fc_mu(x)
log_var = self.fc_log_var(x)
return mu, log_var
class _MLPNet_Aux(torch.nn.Module):
def __init__(self, num_features, num_outputs, num_hidden):
super(_MLPNet_Aux, self).__init__()
self.fc = torch.nn.Linear(num_features, num_hidden)
self.fc_log_var = torch.nn.Linear(num_hidden, num_outputs)
def forward(self, x):
x = F.relu(self.fc(x))
log_var = self.fc_log_var(x)
return log_var
class AuxiliaryIntervalPredictor(BuiltinUQ):
""" Auxiliary Interval Predictor [1]_ uses an auxiliary model to encourage calibration of the main model.
References:
.. [1] Thiagarajan, J. J., Venkatesh, B., Sattigeri, P., & Bremer, P. T. (2020, April). Building calibrated deep
models via uncertainty matching with auxiliary interval predictors. In Proceedings of the AAAI Conference on
Artificial Intelligence (Vol. 34, No. 04, pp. 6005-6012). https://arxiv.org/abs/1909.04079
"""
def __init__(self, model_type=None, main_model=None, aux_model=None, config=None, device=None, verbose=True):
"""
Args:
model_type: The model type used to build the main model and the auxiliary model. Currently supported values
are [mlp, custom]. `mlp` modeltype learns a mlp neural network using pytorch framework. For `custom` the user
provide `main_model` and `aux_model`.
main_model: (optional) The main prediction model. Currently support pytorch models that return mean and log variance.
aux_model: (optional) The auxiliary prediction model. Currently support pytorch models that return calibrated log variance.
config: dictionary containing the config parameters for the model.
device: device used for pytorch models ignored otherwise.
verbose: if True, print statements with the progress are enabled.
"""
super(AuxiliaryIntervalPredictor).__init__()
self.config = config
self.device = device
self.verbose = verbose
if model_type == "mlp":
self.model_type = model_type
self.main_model = _MLPNet_Main(
num_features=self.config["num_features"],
num_outputs=self.config["num_outputs"],
num_hidden=self.config["num_hidden"],
)
self.aux_model = _MLPNet_Aux(
num_features=self.config["num_features"],
num_outputs=self.config["num_outputs"],
num_hidden=self.config["num_hidden"],
)
elif model_type == "custom":
self.model_type = model_type
self.main_model = main_model
self.aux_model = aux_model
else:
raise NotImplementedError
def get_params(self, deep=True):
return {"model_type": self.model_type, "config": self.config, "main_model": self.main_model,
"aux_model": self.aux_model, "device": self.device, "verbose": self.verbose}
def _main_model_loss(self, y_true, y_pred_mu, y_pred_log_var, y_pred_log_var_aux):
r = torch.abs(y_true - y_pred_mu)
# + 0.5 * y_pred_log_var +
loss = torch.mean(0.5 * torch.exp(-y_pred_log_var) * r ** 2) + \\
self.config["lambda_match"] * torch.mean(torch.abs(torch.exp(0.5 * y_pred_log_var) - torch.exp(0.5 * y_pred_log_var_aux)))
return loss
def _aux_model_loss(self, y_true, y_pred_mu, y_pred_log_var_aux):
deltal = deltau = 2.0 * torch.exp(0.5 * y_pred_log_var_aux)
upper = y_pred_mu + deltau
lower = y_pred_mu - deltal
width = upper - lower
r = torch.abs(y_true - y_pred_mu)
emce = torch.mean(torch.sigmoid((y_true - lower) * (upper - y_true) * 100000))
loss_emce = torch.abs(self.config["calibration_alpha"]-emce)
loss_noise = torch.mean(torch.abs(0.5 * width - r))
loss_sharpness = torch.mean(torch.abs(upper - y_true)) + torch.mean(torch.abs(lower - y_true))
#print(emce)
return loss_emce + self.config["lambda_noise"] * loss_noise + self.config["lambda_sharpness"] * loss_sharpness
def fit(self, X, y):
""" Fit the Auxiliary Interval Predictor model.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the training data.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
X = torch.from_numpy(X).float().to(self.device)
y = torch.from_numpy(y).float().to(self.device)
dataset_loader = DataLoader(
TensorDataset(X,y),
batch_size=self.config["batch_size"]
)
optimizer_main_model = torch.optim.Adam(self. |
main_model.parameters(), lr=self.config["lr"])
optimizer_aux_model = torch.optim.Adam(self.aux_model.parameters(), lr=self.config["lr"])
for it in range(self.config["num_outer_iters"]):
# Train the main model
for epoch in range(self.config["num_main_iters"]):
avg_mean_model_loss = 0.0
for batch_x, batch_y in dataset_loader:
self.main_model.train()
self.aux_model.eval()
batch_y_pred_log_var_aux = self.aux_model(batch_x)
batch_y_pred_mu, batch_y_pred_log_var = self.main_model(batch_x)
main_loss = self._main_model_loss(batch_y, batch_y_pred_mu, batch_y_pred_log_var, batch_y_pred_log_var_aux)
optimizer_main_model.zero_grad()
main_loss.backward()
optimizer_main_model.step()
avg_mean_model_loss += main_loss.item()/len(dataset_loader)
if self.verbose:
print("Iter: {}, Epoch: {}, main_model_loss = {}".format(it, epoch, avg_mean_model_loss))
# Train the auxiliary model
for epoch in range(self.config["num_aux_iters"]):
avg_aux_model_loss = 0.0
for batch_x, batch_y in dataset_loader:
self.aux_model.train()
self.main_model.eval()
batch_y_pred_log_var_aux = self.aux_model(batch_x)
batch_y_pred_mu, batch_y_pred_log_var = self.main_model(batch_x)
aux_loss = self._aux_model_loss(batch_y, batch_y_pred_mu, batch_y_pred_log_var_aux)
optimizer_aux_model.zero_grad()
aux_loss.backward()
optimizer_aux_model.step()
avg_aux_model_loss += aux_loss.item() / len(dataset_loader)
if self.verbose:
print("Iter: {}, Epoch: {}, aux_model_loss = {}".format(it, epoch, avg_aux_model_loss))
return self
def predict(self, X, return_dists=False):
"""
Obtain predictions for the test points.
In addition to the mean and lower/upper bounds, also returns full predictive distribution (return_dists=True).
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
return_dists: If True, the predictive distribution for each instance using scipy distributions is returned.
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
dists: list of predictive distribution as `scipy.stats` objects with length n_samples.
Only returned when `return_dists` is True.
"""
self.main_model.eval()
X = torch.from_numpy(X).float().to(self.device)
dataset_loader = DataLoader(
X,
batch_size=self.config["batch_size"]
)
y_mean_list = []
y_log_var_list = []
for batch_x in dataset_loader:
batch_y_pred_mu, batch_y_pred_log_var = self.main_model(batch_x)
y_mean_list.append(batch_y_pred_mu.data.cpu().numpy())
y_log_var_list.append(batch_y_pred_log_var.data.cpu().numpy())
y_mean = np.concatenate(y_mean_list)
y_log_var = np.concatenate(y_log_var_list)
y_std = np.sqrt(np.exp(y_log_var))
y_lower = y_mean - 2.0*y_std
y_upper = y_mean + 2.0*y_std
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
if return_dists:
dists = [norm(loc=y_mean[i], scale=y_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_dists',))
res = Result(*res, y_dists=dists)
return res
<s> from .auxiliary_interval_predictor import AuxiliaryIntervalPredictor
<s> from .infinitesimal_jackknife import InfinitesimalJackknife
<s> from collections import namedtuple
import numpy as np
from uq360.algorithms.posthocuq import PostHocUQ
class InfinitesimalJackknife(PostHocUQ):
"""
Performs a first order Taylor series expansion around MLE / MAP fit.
Requires the model being probed to be twice differentiable.
"""
def __init__(self, params, gradients, hessian, config):
""" Initialize IJ.
Args:
params: MLE / MAP fit around which uncertainty is sought. d*1
gradients: Per data point gradients, estimated at the MLE / MAP fit. d*n
hessian: Hessian evaluated at the MLE / MAP fit. d*d
"""
super(InfinitesimalJackknife).__init__()
self.params_one = params
self.gradients = gradients
self.hessian = hessian
self.d, self.n = gradients.shape
self.dParams_dWeights = -np.linalg.solve(self.hessian, self.gradients)
self.approx_dParams_dWeights = -np.linalg.solve(np.diag(np.diag(self.hessian)), self.gradients)
self.w_one = np.ones([self.n])
self.config = config
def get_params(self, deep=True):
return {"params": self.params, "config": self.config, "gradients": self.gradients,
"hessian": self.hessian}
def _process_pretrained_model(self, *argv, **kwargs):
pass
def get_parameter_uncertainty(self):
if (self.config['resampling_strategy'] == "jackknife") or (self.config['resampling_strategy'] == "jackknife+"):
w_query = np.ones_like(self.w_one)
resampled_params = np.zeros([self.n, self.d])
for i in np.arange(self.n):
w_query[i] = 0
resampled_params[i] = self.ij(w_query)
w_query[i] = 1
return np.cov(resampled_params), resampled_params
elif self.config['resampling_strategy'] == "bootstrap":
pass
else:
raise NotImplementedError("Only jackknife, jackknife+, and bootstrap resampling strategies are supported")
def predict(self, X, model):
"""
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
model: model object, must implement a set_parameters function
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
"""
n, _ = X.shape
y_all = model.predict(X)
_, d_out = y_all.shape
params_cov, params = self.get_parameter_uncertainty()
if d_out > 1:
print("Quantiles are computed independently for each dimension. May not be accurate.")
y = np.zeros([params.shape[0], n, d_out])
for i in np.arange(params.shape[0]):
model.set_parameters(params[i])
y[i] = model.predict(X)
y_lower = np.quantile(y, q=0.5 * self.config['alpha'], axis=0)
y_upper = np.quantile(y, q=(1. - 0.5 * self.config['alpha']), axis=0)
y_mean = y.mean(axis=0)
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
return res
def ij(self, w_query):
"""
Args:
w_query: A n*1 vector to query parameters at.
Return:
new parameters at w_query
"""
assert w_query.shape[0] == self.n
return self.params_one + self.dParams_dWeights @ (w_query-self.w_one).T
def approx_ij(self, w_query):
"""
Args:
w_query: A n*1 vector to query parameters at.
Return:
new parameters at w_query
"""
assert w_query.shape[0] == self.n
return self.params_one + self.approx_dParams_dWeights @ (w_query-self.w_one).T<s> import copy
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.utils.data as data_utils
from scipy.stats import norm
from sklearn.preprocessing import StandardScaler
from uq360.algorithms.builtinuq import BuiltinUQ
from uq360.models.bayesian_neural_networks.bnn_models import horseshoe_mlp, bayesian_mlp
class BnnRegression(BuiltinUQ):
"""
Variationally trained BNNs with Gaussian and Horseshoe [6]_ priors for regression.
References:
.. [6] Ghosh, Soumya, Jiayu Yao, and Finale Doshi-Velez. "Structured variational learning of Bayesian neural
networks with horseshoe priors." International Conference on Machine Learning. PMLR, 2018.
"""
def __init__(self, config, prior="Gaussian"):
"""
Args:
config: a dictionary specifying network and learning hyperparameters.
prior: BNN priors specified as a string. Supported priors are Gaussian, Hshoe, RegHshoe
"""
super(BnnRegression, self).__init__()
self.config = config
if prior == "Gaussian":
self.net = bayesian_mlp.BayesianRegressionNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'])
self.config['use_reg_hshoe'] = None
elif prior == "Hshoe":
self.net = horseshoe_mlp.HshoeRegressionNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'],
hshoe_scale=config['hshoe_scale'])
self.config['use_reg_hshoe'] = False
elif prior == "RegHshoe":
self.net = horseshoe_mlp.HshoeRegressionNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'],
hshoe_scale=config['hshoe_scale'],
use_reg_hshoe=config['use_reg_hshoe'])
self.config['use_reg_hshoe'] = True
else:
raise NotImplementedError("'prior' must be a string. It can be one of Gaussian, Hshoe, RegHshoe")
def get_params(self, deep=True):
return {"prior": self.prior, "config": self.config}
def fit(self, X, y):
""" Fit the BNN regression model.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the training data.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
torch.manual_seed(1234)
optimizer = torch.optim.Adam(self.net.parameters(), lr=self.config['step_size'])
neg_elbo = torch.zeros([self.config['num_epochs'], 1])
params_store = {}
for epoch in range(self.config['num_epochs']):
loss = self.net.neg_elbo(num_batches=1, x=X, y=y.float().unsqueeze(dim=1)) / X.shape[0]
optimizer.zero_grad()
loss.backward()
optimizer.step()
if hasattr(self.net, 'fixed_point_updates'):
# for hshoe or regularized hshoe nets
self.net.fixed_point_updates()
neg_elbo[epoch] = loss.item()
if (epoch + 1) % 10 == 0:
# print ((net.noise_layer.bhat/net.noise_layer.ahat).data.numpy()[0])
print('Epoch[{}/{}], neg elbo: {:.6f}, noise var: {:.6f}'
.format(epoch + 1, self.config['num_epochs'], neg_elbo[epoch].item() / X.shape[0],
self.net.get_noise_var()))
params_store[epoch] = copy.deepcopy(self.net.state_dict()) # for small nets we can just store all.
best_model_id = neg_elbo.argmin() # loss_val_store.argmin() #
self.net.load_state_dict(params_store[best_model_id.item()])
return self
def predict(self, X, mc_samples=100, return_dists=False, return_epistemic=True, return_epistemic_dists=False):
"""
Obtain predictions for the test points.
In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True)
and full predictive distribution (return_dists=True).
Args:
|
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
mc_samples: Number of Monte-Carlo samples.
return_dists: If True, the predictive distribution for each instance using scipy distributions is returned.
return_epistemic: if True, the epistemic upper and lower bounds are returned.
return_epistemic_dists: If True, the epistemic distribution for each instance using scipy distributions
is returned.
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
y_lower_epistemic: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of epistemic component of the predictive distribution of the test points.
Only returned when `return_epistemic` is True.
y_upper_epistemic: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of epistemic component of the predictive distribution of the test points.
Only returned when `return_epistemic` is True.
dists: list of predictive distribution as `scipy.stats` objects with length n_samples.
Only returned when `return_dists` is True.
"""
epistemic_out = np.zeros([mc_samples, X.shape[0]])
total_out = np.zeros([mc_samples, X.shape[0]])
for s in np.arange(mc_samples):
pred = self.net(X).data.numpy().ravel()
epistemic_out[s] = pred
total_out[s] = pred + np.sqrt(self.net.get_noise_var()) * np.random.randn(pred.shape[0])
y_total_std = np.std(total_out, axis=0)
y_epi_std = np.std(epistemic_out, axis=0)
y_mean = np.mean(total_out, axis=0)
y_lower = y_mean - 2 * y_total_std
y_upper = y_mean + 2 * y_total_std
y_epi_lower = y_mean - 2 * y_epi_std
y_epi_upper = y_mean + 2 * y_epi_std
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
if return_epistemic:
Result = namedtuple('res', Result._fields + ('lower_epistemic', 'upper_epistemic',))
res = Result(*res, lower_epistemic=y_epi_lower, upper_epistemic=y_epi_upper)
if return_dists:
dists = [norm(loc=y_mean[i], scale=y_total_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_dists',))
res = Result(*res, y_dists=dists)
if return_epistemic_dists:
epi_dists = [norm(loc=y_mean[i], scale=y_epi_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_epistemic_dists',))
res = Result(*res, y_epistemic_dists=epi_dists)
return res
class BnnClassification(BuiltinUQ):
"""
Variationally trained BNNs with Gaussian and Horseshoe [6]_ priors for classification.
"""
def __init__(self, config, prior="Gaussian", device=None):
"""
Args:
config: a dictionary specifying network and learning hyperparameters.
prior: BNN priors specified as a string. Supported priors are Gaussian, Hshoe, RegHshoe
"""
super(BnnClassification, self).__init__()
self.config = config
self.device = device
if prior == "Gaussian":
self.net = bayesian_mlp.BayesianClassificationNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'])
self.config['use_reg_hshoe'] = None
elif prior == "Hshoe":
self.net = horseshoe_mlp.HshoeClassificationNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'],
hshoe_scale=config['hshoe_scale'])
self.config['use_reg_hshoe'] = False
elif prior == "RegHshoe":
self.net = horseshoe_mlp.HshoeClassificationNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'],
hshoe_scale=config['hshoe_scale'],
use_reg_hshoe=config['use_reg_hshoe'])
self.config['use_reg_hshoe'] = True
else:
raise NotImplementedError("'prior' must be a string. It can be one of Gaussian, Hshoe, RegHshoe")
if "batch_size" not in self.config:
self.config["batch_size"] = 50
self.net = self.net.to(device)
def get_params(self, deep=True):
return {"prior": self.prior, "config": self.config, "device": self.device}
def fit(self, X=None, y=None, train_loader=None):
""" Fits BNN regression model.
Args:
X: array-like of shape (n_samples, n_features) or (n_samples, n_classes).
Features vectors of the training data or the probability scores from the base model.
Ignored if train_loader is not None.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Ignored if train_loader is not None.
train_loader: pytorch train_loader object.
Returns:
self
"""
if train_loader is None:
train = data_utils.TensorDataset(torch.Tensor(X), torch.Tensor(y.values).long())
train_loader = data_utils.DataLoader(train, batch_size=self.config['batch_size'], shuffle=True)
torch.manual_seed(1234)
optimizer = torch.optim.Adam(self.net.parameters(), lr=self.config['step_size'])
neg_elbo = torch.zeros([self.config['num_epochs'], 1])
params_store = {}
for epoch in range(self.config['num_epochs']):
avg_loss = 0.0
for batch_x, batch_y in train_loader:
loss = self.net.neg_elbo(num_batches=len(train_loader), x=batch_x, y=batch_y) / batch_x.size(0)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if hasattr(self.net, 'fixed_point_updates'):
# for hshoe or regularized hshoe nets
self.net.fixed_point_updates()
avg_loss += loss.item()
neg_elbo[epoch] = avg_loss / len(train_loader)
if (epoch + 1) % 10 == 0:
# print ((net.noise_layer.bhat/net.noise_layer.ahat).data.numpy()[0])
print('Epoch[{}/{}], neg elbo: {:.6f}'
.format(epoch + 1, self.config['num_epochs'], neg_elbo[epoch].item()))
params_store[epoch] = copy.deepcopy(self.net.state_dict()) # for small nets we can just store all.
best_model_id = neg_elbo.argmin() # loss_val_store.argmin() #
self.net.load_state_dict(params_store[best_model_id.item()])
return self
def predict(self, X, mc_samples=100):
"""
Obtain calibrated predictions for the test points.
Args:
X: array-like of shape (n_samples, n_features) or (n_samples, n_classes).
Features vectors of the training data or the probability scores from the base model.
mc_samples: Number of Monte-Carlo samples.
Returns:
namedtuple: A namedtupe that holds
y_pred: ndarray of shape (n_samples,)
Predicted labels of the test points.
y_prob: ndarray of shape (n_samples, n_classes)
Predicted probability scores of the classes.
y_prob_var: ndarray of shape (n_samples,)
Variance of the prediction on the test points.
y_prob_samples: ndarray of shape (mc_samples, n_samples, n_classes)
Samples from the predictive distribution.
"""
X = torch.Tensor(X)
y_prob_samples = [F.softmax(self.net(X), dim=1).detach().numpy() for _ in np.arange(mc_samples)]
y_prob_samples_stacked = np.stack(y_prob_samples)
prob_mean = np.mean(y_prob_samples_stacked, 0)
prob_var = np.std(y_prob_samples_stacked, 0) ** 2
if len(np.shape(prob_mean)) == 1:
y_pred_labels = prob_mean > 0.5
else:
y_pred_labels = np.argmax(prob_mean, axis=1)
Result = namedtuple('res', ['y_pred', 'y_prob', 'y_prob_var', 'y_prob_samples'])
res = Result(y_pred_labels, prob_mean, prob_var, y_prob_samples)
return res
<s><s> import inspect
from collections import namedtuple
import numpy as np
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.exceptions import NotFittedError
from uq360.algorithms.posthocuq import PostHocUQ
class BlackboxMetamodelClassification(PostHocUQ):
""" Extracts confidence scores from black-box classification models using a meta-model [4]_ .
References:
.. [4] Chen, Tongfei, et al. "Confidence scoring using whitebox meta-models with linear classifier probes."
The 22nd International Conference on Artificial Intelligence and Statistics. PMLR, 2019.
"""
def _create_named_model(self, mdltype, config):
""" Instantiates a model by name passed in 'mdltype'.
Args:
mdltype: string with name (must be supported)
config: dict with args passed in the instantiation call
Returns:
mdl instance
"""
assert (isinstance(mdltype, str))
if mdltype == 'lr':
mdl = LogisticRegression(**config)
elif mdltype == 'gbm':
mdl = GradientBoostingClassifier(**config)
else:
raise NotImplementedError("ERROR: Requested model type unknown: \\"%s\\"" % mdltype)
return mdl
def _get_model_instance(self, model, config):
""" Returns an instance of a model based on (a) a desired name or (b) passed in class, or
(c) passed in instance.
:param model: string, class, or instance. Class and instance must have certain methods callable.
:param config: dict with args passed in during the instantiation
:return: model instance
"""
assert (model is not None and config is not None)
if isinstance(model, str): # 'model' is a name, create it
mdl = self._create_named_model(model, config)
elif inspect.isclass(model): # 'model' is a class, instantiate it
mdl = model(**config)
else: # 'model' is an instance, register it
mdl = model
if not all([hasattr(mdl, key) and callable(getattr(mdl, key)) for key in self.callable_keys]):
raise ValueError("ERROR: Passed model/method failed the interface test. Methods required: %s" %
','.join(self.callable_keys))
return mdl
def __init__(self, base_model=None, meta_model=None, base_config=None, meta_config=None, random_seed=42):
"""
:param base_model: Base model. Can be:
(1) None (default mdl will be set up),
(2) Named model (e.g., logistic regression 'lr' or gradient boosting machine 'gbm'),
(3) Base model class declaration (e.g., sklearn.linear_model.LogisticRegression). Will instantiate.
(4) Model instance (instantiated outside). Will be re-used. Must have certain callable methods.
Note: user-supplied classes and models must have certain callable methods ('predict', 'fit')
and be capable of raising NotFittedError.
:param meta_model: Meta model. Same values possible as with 'base_model'
:param base_config: None or a params dict to be passed to 'base_model' at instantiation
:param meta_config: None or a params dict to be passed to 'meta_model' at instantiation
:param random_seed: seed used in the various pipeline steps
"""
super(BlackboxMetamodelClassification).__init__()
self.random_seed = random_seed
self.callable_keys = ['predict', 'fit'] # required methods - must be present in models passed in
self.base_model_default = 'gbm'
self.meta_model_default = 'lr'
self.base_config_default = {'n_estimators': 300, 'max_depth': 10,
'learning_rate': 0.001, 'min_samples_leaf': 10, 'min_samples_split': 10,
'random_state': self.random_seed}
self.meta_config_default = {'penalty': 'l1', 'C': 1, 'solver': 'liblinear', 'random_state': self.random_seed}
self.base_config = base_config if base_config is not None else self.base_config_default
self.meta_config = meta_config if meta_config is not None else self.meta_config_default
self.base_model = None
self.meta_model = None
self.base_model = self._get_model_instance(base_model if base_model is not None else self.base_model_default,
self.base_config)
|
self.meta_model = self._get_model_instance(meta_model if meta_model is not None else self.meta_model_default,
self.meta_config)
def get_params(self, deep=True):
return {"base_model": self.base_model, "meta_model": self.meta_model, "base_config": self.base_config,
"meta_config": self.meta_config, "random_seed": self.random_seed}
def _process_pretrained_model(self, X, y_hat_proba):
"""
Given the original input features and the base output probabilities, generate input features
to train a meta model. Current implementation copies all input features and appends.
:param X: numpy [nsamples, dim]
:param y_hat_proba: [nsamples, nclasses]
:return: array with new features [nsamples, newdim]
"""
assert (len(y_hat_proba.shape) == 2)
assert (X.shape[0] == y_hat_proba.shape[0])
# sort the probs sample by sample
faux1 = np.sort(y_hat_proba, axis=-1)
# add delta between top and second candidate
faux2 = np.expand_dims(faux1[:, -1] - faux1[:, -2], axis=-1)
return np.hstack([X, faux1, faux2])
def fit(self, X, y, meta_fraction=0.2, randomize_samples=True, base_is_prefitted=False,
meta_train_data=(None, None)):
"""
Fit base and meta models.
:param X: input to the base model,
array-like of shape (n_samples, n_features).
Features vectors of the training data.
:param y: ground truth for the base model,
array-like of shape (n_samples,)
:param meta_fraction: float in [0,1] - a fractional size of the partition carved out to train the meta model
(complement will be used to train the base model)
:param randomize_samples: use shuffling when creating partitions
:param base_is_prefitted: Setting True will skip fitting the base model (useful for base models that have been
instantiated outside/by the user and are already fitted.
:param meta_train_data: User supplied data to train the meta model. Note that this option should only be used
with 'base_is_prefitted'==True. Pass a tuple meta_train_data=(X_meta, y_meta) to activate.
Note that (X,y,meta_fraction, randomize_samples) will be ignored in this mode.
:return: self
"""
X = np.asarray(X)
y = np.asarray(y)
assert (len(meta_train_data) == 2)
if meta_train_data[0] is None:
X_base, X_meta, y_base, y_meta = train_test_split(X, y, shuffle=randomize_samples, test_size=meta_fraction,
random_state=self.random_seed)
else:
if not base_is_prefitted:
raise ValueError("ERROR: fit(): base model must be pre-fitted to use the 'meta_train_data' option")
X_base = y_base = None
X_meta = meta_train_data[0]
y_meta = meta_train_data[1]
# fit the base model
if not base_is_prefitted:
self.base_model.fit(X_base, y_base)
# get input for the meta model from the base
try:
y_hat_meta_proba = self.base_model.predict_proba(X_meta)
# determine correct-incorrect outcome - these are targets for the meta model trainer
# y_hat_meta_targets = np.asarray((y_meta == np.argmax(y_hat_meta_proba, axis=-1)), dtype=np.int) -- Fix for python 3.8.11 update (in 2.9.0.8)
y_hat_meta_targets = np.asarray((y_meta == np.argmax(y_hat_meta_proba, axis=-1)), dtype=int)
except NotFittedError as e:
raise RuntimeError("ERROR: fit(): The base model appears not pre-fitted (%s)" % repr(e))
# get input features for meta training
X_meta_in = self._process_pretrained_model(X_meta, y_hat_meta_proba)
# train meta model to predict 'correct' vs. 'incorrect' of the base
self.meta_model.fit(X_meta_in, y_hat_meta_targets)
return self
def predict(self, X):
"""
Generate a base prediction along with uncertainty/confidence for data X.
:param X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
:return: namedtuple: A namedtuple that holds
y_pred: ndarray of shape (n_samples,)
Predicted labels of the test points.
y_score: ndarray of shape (n_samples,)
Confidence score the test points.
"""
y_hat_proba = self.base_model.predict_proba(X)
y_hat = np.argmax(y_hat_proba, axis=-1)
X_meta_in = self._process_pretrained_model(X, y_hat_proba)
z_hat = self.meta_model.predict_proba(X_meta_in)
index_of_class_1 = np.where(self.meta_model.classes_ == 1)[0][0] # class 1 corresponds to probab of positive/correct outcome
Result = namedtuple('res', ['y_pred', 'y_score'])
res = Result(y_hat, z_hat[:, index_of_class_1])
return res
<s> from .blackbox_metamodel_regression import BlackboxMetamodelRegression
from .blackbox_metamodel_classification import BlackboxMetamodelClassification
<s> import inspect
from collections import namedtuple
import numpy as np
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import train_test_split
from sklearn.exceptions import NotFittedError
from uq360.algorithms.posthocuq import PostHocUQ
class BlackboxMetamodelRegression(PostHocUQ):
""" Extracts confidence scores from black-box regression models using a meta-model [2]_ .
References:
.. [2] Chen, Tongfei, et al. Confidence scoring using whitebox meta-models with linear classifier probes.
The 22nd International Conference on Artificial Intelligence and Statistics. PMLR, 2019.
"""
def _create_named_model(self, mdltype, config):
"""
Instantiates a model by name passed in 'mdltype'
:param mdltype: string with name (must be supprted)
:param config: dict with args passed in the instantiation call
:return: mdl instance
"""
assert (isinstance(mdltype, str))
if mdltype == 'gbr':
mdl = GradientBoostingRegressor(**config)
else:
raise NotImplementedError("ERROR: Requested model type unknown: \\"%s\\"" % mdltype)
return mdl
def _get_model_instance(self, model, config):
"""
Returns an instance of a model based on (a) a desired name or (b) passed in class, or
(c) passed in instance
:param model: string, class, or instance. Class and instance must have certain methods callable.
:param config: dict with args passed in during the instantiation
:return: model instance
"""
assert (model is not None and config is not None)
if isinstance(model, str): # 'model' is a name, create it
mdl = self._create_named_model(model, config)
elif inspect.isclass(model): # 'model' is a class, instantiate it
mdl = model(**config)
else: # 'model' is an instance, register it
mdl = model
if not all([hasattr(mdl, key) and callable(getattr(mdl, key)) for key in self.callable_keys]):
raise ValueError("ERROR: Passed model/method failed the interface test. Methods required: %s" %
','.join(self.callable_keys))
return mdl
def __init__(self, base_model=None, meta_model=None, base_config=None, meta_config=None, random_seed=42):
"""
:param base_model: Base model. Can be:
(1) None (default mdl will be set up),
(2) Named model (e.g., 'gbr'),
(3) Base model class declaration (e.g., sklearn.linear_model.LinearRegressor). Will instantiate.
(4) Model instance (instantiated outside). Will be re-used. Must have required callable methods.
Note: user-supplied classes and models must have certain callable methods ('predict', 'fit')
and be capable of raising NotFittedError.
:param meta_model: Meta model. Same values possible as with 'base_model'
:param base_config: None or a params dict to be passed to 'base_model' at instantiation
:param meta_config: None or a params dict to be passed to 'meta_model' at instantiation
:param random_seed: seed used in the various pipeline steps
"""
super(BlackboxMetamodelRegression).__init__()
self.random_seed = random_seed
self.callable_keys = ['predict', 'fit'] # required methods - must be present in models passed in
self.base_model_default = 'gbr'
self.meta_model_default = 'gbr'
self.base_config_default = {'loss': 'ls', 'n_estimators': 300, 'max_depth': 10, 'learning_rate': 0.001,
'min_samples_leaf': 10, 'min_samples_split': 10, 'random_state': self.random_seed}
self.meta_config_default = {'loss': 'quantile', 'alpha': 0.95, 'n_estimators': 300, 'max_depth': 10,
'learning_rate': 0.001, 'min_samples_leaf': 10, 'min_samples_split': 10,
'random_state': self.random_seed}
self.base_config = base_config if base_config is not None else self.base_config_default
self.meta_config = meta_config if meta_config is not None else self.meta_config_default
self.base_model = None
self.meta_model = None
self.base_model = self._get_model_instance(base_model if base_model is not None else self.base_model_default,
self.base_config)
self.meta_model = self._get_model_instance(meta_model if meta_model is not None else self.meta_model_default,
self.meta_config)
def get_params(self, deep=True):
return {"base_model": self.base_model, "meta_model": self.meta_model, "base_config": self.base_config,
"meta_config": self.meta_config, "random_seed": self.random_seed}
def fit(self, X, y, meta_fraction=0.2, randomize_samples=True, base_is_prefitted=False,
meta_train_data=(None, None)):
"""
Fit base and meta models.
:param X: input to the base model
:param y: ground truth for the base model
:param meta_fraction: float in [0,1] - a fractional size of the partition carved out to train the meta model
(complement will be used to train the base model)
:param randomize_samples: use shuffling when creating partitions
:param base_is_prefitted: Setting True will skip fitting the base model (useful for base models that have been
instantiated outside/by the user and are already fitted.
:param meta_train_data: User supplied data to train the meta model. Note that this option should only be used
with 'base_is_prefitted'==True. Pass a tuple meta_train_data=(X_meta, y_meta) to activate.
Note that (X,y,meta_fraction, randomize_samples) will be ignored in this mode.
:return: self
"""
X = np.asarray(X)
y = np.asarray(y)
assert(len(meta_train_data)==2)
if meta_train_data[0] is None:
X_base, X_meta, y_base, y_meta = train_test_split(X, y, shuffle=randomize_samples, test_size=meta_fraction,
random_state=self.random_seed)
else:
if not base_is_prefitted:
raise ValueError("ERROR: fit(): base model must be pre-fitted to use the 'meta_train_data' option")
X_base = y_base = None
X_meta = meta_train_data[0]
y_meta = meta_train_data[1]
# fit the base model
if not base_is_prefitted:
self.base_model.fit(X_base, y_base)
# get input for the meta model from the base
try:
y_hat_meta = self.base_model.predict(X_meta)
except NotFittedError as e:
raise RuntimeError("ERROR: fit(): The base model appears not pre-fitted (%s)" % repr(e))
# used base input and output as meta input
X_meta_in = self._process_pretrained_model(X_meta, y_hat_meta)
# train meta model to predict abs diff
self.meta_model.fit(X_meta_in, np.abs(y_hat_meta - y_meta))
return self
def _process_pretrained_model(self, X, y_hat):
"""
Given the original input features and the base output probabilities, generate input features
to train a meta model. Current implementation copies all input features and appends.
:param X: numpy [nsamples, dim]
:param y_hat: [nsamples,]
:return: array with new features [nsamples, newdim]
"""
y_hat_meta_prime = np.expand_dims(y_hat, -1) if len(y_hat.shape) < 2 else y_hat
X_meta_in = np.hstack([X, y_hat_meta_prime])
return X_meta_in
def predict(self, X):
"""
Generate prediction and uncertainty bounds for data X.
:param X: input features
:return: namedtuple: A namedtuple that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
"""
y_hat = self.base_model.predict(X)
y_hat_prime = np.expand_dims(y_hat, -1) if len(y_hat.shape) < 2 else y_hat
X_meta_in = np.h |
stack([X, y_hat_prime])
z_hat = self.meta_model.predict(X_meta_in)
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_hat, y_hat - z_hat, y_hat + z_hat)
return res
<s> from .quantile_regression import QuantileRegression
<s> from collections import namedtuple
from sklearn.ensemble import GradientBoostingRegressor
from uq360.algorithms.builtinuq import BuiltinUQ
class QuantileRegression(BuiltinUQ):
"""Quantile Regression uses quantile loss and learns two separate models for the upper and lower quantile
to obtain the prediction intervals.
"""
def __init__(self, model_type="gbr", config=None):
"""
Args:
model_type: The base model used for predicting a quantile. Currently supported values are [gbr].
gbr is sklearn GradientBoostingRegressor.
config: dictionary containing the config parameters for the model.
"""
super(QuantileRegression).__init__()
if config is not None:
self.config = config
else:
self.config = {}
if "alpha" not in self.config:
self.config["alpha"] = 0.95
if model_type == "gbr":
self.model_type = model_type
self.model_mean = GradientBoostingRegressor(
loss='ls',
n_estimators=self.config["n_estimators"],
max_depth=self.config["max_depth"],
learning_rate=self.config["learning_rate"],
min_samples_leaf=self.config["min_samples_leaf"],
min_samples_split=self.config["min_samples_split"]
)
self.model_upper = GradientBoostingRegressor(
loss='quantile',
alpha=self.config["alpha"],
n_estimators=self.config["n_estimators"],
max_depth=self.config["max_depth"],
learning_rate=self.config["learning_rate"],
min_samples_leaf=self.config["min_samples_leaf"],
min_samples_split=self.config["min_samples_split"]
)
self.model_lower = GradientBoostingRegressor(
loss='quantile',
alpha=1.0 - self.config["alpha"],
n_estimators=self.config["n_estimators"],
max_depth=self.config["max_depth"],
learning_rate=self.config["learning_rate"],
min_samples_leaf=self.config["min_samples_leaf"],
min_samples_split=self.config["min_samples_split"])
else:
raise NotImplementedError
def get_params(self, deep=True):
return {"model_type": self.model_type, "config": self.config}
def fit(self, X, y):
""" Fit the Quantile Regression model.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the training data.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
self.model_mean.fit(X, y)
self.model_lower.fit(X, y)
self.model_upper.fit(X, y)
return self
def predict(self, X):
"""
Obtain predictions for the test points.
In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True)
and full predictive distribution (return_dists=True).
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
"""
y_mean = self.model_mean.predict(X)
y_lower = self.model_lower.predict(X)
y_upper = self.model_upper.predict(X)
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
return res
<s> from collections import namedtuple
import botorch
import gpytorch
import numpy as np
import torch
from botorch.models import SingleTaskGP
from botorch.utils.transforms import normalize
from gpytorch.constraints import GreaterThan
from scipy.stats import norm
from sklearn.preprocessing import StandardScaler
from uq360.algorithms.builtinuq import BuiltinUQ
np.random.seed(42)
torch.manual_seed(42)
class HomoscedasticGPRegression(BuiltinUQ):
""" A wrapper around Botorch SingleTask Gaussian Process Regression [1]_ with homoscedastic noise.
References:
.. [1] https://botorch.org/api/models.html#singletaskgp
"""
def __init__(self,
kernel=gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel()),
likelihood=None,
config=None):
"""
Args:
kernel: gpytorch kernel function with default set to `RBFKernel` with output scale.
likelihood: gpytorch likelihood function with default set to `GaussianLikelihood`.
config: dictionary containing the config parameters for the model.
"""
super(HomoscedasticGPRegression).__init__()
self.config = config
self.kernel = kernel
self.likelihood = likelihood
self.model = None
self.scaler = StandardScaler()
self.X_bounds = None
def get_params(self, deep=True):
return {"kernel": self.kernel, "likelihood": self.likelihood, "config": self.config}
def fit(self, X, y, **kwargs):
"""
Fit the GP Regression model.
Additional arguments relevant for SingleTaskGP fitting can be passed to this function.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the training data.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
**kwargs: Additional arguments relevant for SingleTaskGP fitting.
Returns:
self
"""
y = self.scaler.fit_transform(y)
X, y = torch.tensor(X), torch.tensor(y)
self.X_bounds = X_bounds = torch.stack([X.min() * torch.ones(X.shape[1]),
X.max() * torch.ones(X.shape[1])])
X = normalize(X, X_bounds)
model_homo = SingleTaskGP(train_X=X, train_Y=y, covar_module=self.kernel, likelihood=self.likelihood, **kwargs)
model_homo.likelihood.noise_covar.register_constraint("raw_noise", GreaterThan(1e-5))
model_homo_marginal_log_lik = gpytorch.mlls.ExactMarginalLogLikelihood(model_homo.likelihood, model_homo)
botorch.fit.fit_gpytorch_model(model_homo_marginal_log_lik)
model_homo_marginal_log_lik.eval()
self.model = model_homo_marginal_log_lik
self.inferred_observation_noise = self.scaler.inverse_transform(self.model.likelihood.noise.detach().numpy()[0].reshape(1,1)).squeeze()
return self
def predict(self, X, return_dists=False, return_epistemic=False, return_epistemic_dists=False):
"""
Obtain predictions for the test points.
In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True)
and full predictive distribution (return_dists=True).
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
return_dists: If True, the predictive distribution for each instance using scipy distributions is returned.
return_epistemic: if True, the epistemic upper and lower bounds are returned.
return_epistemic_dists: If True, the epistemic distribution for each instance using scipy distributions
is returned.
Returns:
namedtuple: A namedtuple that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
y_lower_epistemic: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of epistemic component of the predictive distribution of the test points.
Only returned when `return_epistemic` is True.
y_upper_epistemic: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of epistemic component of the predictive distribution of the test points.
Only returned when `return_epistemic` is True.
dists: list of predictive distribution as `scipy.stats` objects with length n_samples.
Only returned when `return_dists` is True.
"""
X = torch.tensor(X)
X_test_norm = normalize(X, self.X_bounds)
self.model.eval()
with torch.no_grad():
posterior = self.model.model.posterior(X_test_norm)
y_mean = posterior.mean
#y_epi_std = torch.sqrt(posterior.variance)
y_lower_epistemic, y_upper_epistemic = posterior.mvn.confidence_region()
predictive_posterior = self.model.model.posterior(X_test_norm, observation_noise=True)
#y_std = torch.sqrt(predictive_posterior.variance)
y_lower_total, y_upper_total = predictive_posterior.mvn.confidence_region()
y_mean, y_lower, y_upper, y_lower_epistemic, y_upper_epistemic = self.scaler.inverse_transform(y_mean.numpy()).squeeze(), \\
self.scaler.inverse_transform(y_lower_total.numpy()).squeeze(),\\
self.scaler.inverse_transform(y_upper_total.numpy()).squeeze(),\\
self.scaler.inverse_transform(y_lower_epistemic.numpy()).squeeze(),\\
self.scaler.inverse_transform(y_upper_epistemic.numpy()).squeeze()
y_epi_std = (y_upper_epistemic - y_lower_epistemic) / 4.0
y_std = (y_upper_total - y_lower_total) / 4.0
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
if return_epistemic:
Result = namedtuple('res', Result._fields + ('y_lower_epistemic', 'y_upper_epistemic',))
res = Result(*res, y_lower_epistemic=y_lower_epistemic, y_upper_epistemic=y_upper_epistemic)
if return_dists:
dists = [norm(loc=y_mean[i], scale=y_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_dists',))
res = Result(*res, y_dists=dists)
if return_epistemic_dists:
epi_dists = [norm(loc=y_mean[i], scale=y_epi_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_epistemic_dists',))
res = Result(*res, y_epistemic_dists=epi_dists)
return res
<s> from .homoscedastic_gaussian_process_regression import HomoscedasticGPRegression<s> from .ucc_recalibration import UCCRecalibration
<s> from collections import namedtuple
from uq360.algorithms.posthocuq import PostHocUQ
from uq360.utils.misc import form_D_for_auucc
from uq360.metrics.uncertainty_characteristics_curve.uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve
class UCCRecalibration(PostHocUQ):
""" Recalibration a regression model to specified operating point using Uncertainty Characteristics Curve.
"""
def __init__(self, base_model):
"""
Args:
base_model: pretrained model to be recalibrated.
"""
super(UCCRecalibration).__init__()
self.base_model = self._process_pretrained_model(base_model)
self.ucc = None
def get_params(self, deep=True):
return {"base_model": self.base_model}
def _process_pretrained_model(self, base_model):
return base_model
def fit(self, X, y):
"""
Fit the Uncertainty Characteristics Curve.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
y_pred_mean, y_pred_lower, y_pred_upper = self.base_model.predict(X)[:3]
bwu = y_pred_upper - y_pred_mean
bwl = y_pred_mean - y_pred_lower
self.ucc = UncertaintyCharacteristicsCurve()
self.ucc.fit(form_D_for_auucc(y_pred_mean, bwl, bwu), y.squeeze())
return self
def predict(self, X, missrate=0.05):
"""
Generate prediction and uncertainty bounds for data X.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
missrate: desired missrate of the new operating point, set to 0.05 by default.
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of |
shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
"""
C = self.ucc.get_specific_operating_point(req_y_axis_value=missrate, vary_bias=False)
new_scale = C['modvalue']
y_pred_mean, y_pred_lower, y_pred_upper = self.base_model.predict(X)[:3]
bwu = y_pred_upper - y_pred_mean
bwl = y_pred_mean - y_pred_lower
if C['operation'] == 'bias':
calib_y_pred_upper = y_pred_mean + (new_scale + bwu) # lower bound width
calib_y_pred_lower = y_pred_mean - (new_scale + bwl) # Upper bound width
else:
calib_y_pred_upper = y_pred_mean + (new_scale * bwu) # lower bound width
calib_y_pred_lower = y_pred_mean - (new_scale * bwl) # Upper bound width
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_pred_mean, calib_y_pred_lower, calib_y_pred_upper)
return res
<s> from collections import namedtuple
import numpy as np
from sklearn.calibration import CalibratedClassifierCV
from sklearn.preprocessing import LabelEncoder
from uq360.utils.misc import DummySklearnEstimator
from uq360.algorithms.posthocuq import PostHocUQ
class ClassificationCalibration(PostHocUQ):
"""Post hoc calibration of classification models. Currently wraps `CalibratedClassifierCV` from sklearn and allows
non-sklearn models to be calibrated.
"""
def __init__(self, num_classes, fit_mode="features", method='isotonic', base_model_prediction_func=None):
"""
Args:
num_classes: number of classes.
fit_mode: features or probs. If probs the `fit` and `predict` operate on the base models probability scores,
useful when these are precomputed.
method: isotonic or sigmoid.
base_model_prediction_func: the function that takes in the input features and produces base model's
probability scores. This is ignored when operating in `probs` mode.
"""
super(ClassificationCalibration).__init__()
if fit_mode == "probs":
# In this case, the fit assumes that it receives the probability scores of the base model.
# create a dummy estimator
self.base_model = DummySklearnEstimator(num_classes, lambda x: x)
else:
self.base_model = DummySklearnEstimator(num_classes, base_model_prediction_func)
self.method = method
def get_params(self, deep=True):
return {"num_classes": self.num_classes, "fit_mode": self.fit_mode, "method": self.method,
"base_model_prediction_func": self.base_model_prediction_func}
def _process_pretrained_model(self, base_model):
return base_model
def fit(self, X, y):
""" Fits calibration model using the provided calibration set.
Args:
X: array-like of shape (n_samples, n_features) or (n_samples, n_classes).
Features vectors of the training data or the probability scores from the base model.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
self.base_model.label_encoder_ = LabelEncoder().fit(y)
self.calib_model = CalibratedClassifierCV(base_estimator=self.base_model,
cv="prefit",
method=self.method)
self.calib_model.fit(X, y)
return self
def predict(self, X):
"""
Obtain calibrated predictions for the test points.
Args:
X: array-like of shape (n_samples, n_features) or (n_samples, n_classes).
Features vectors of the training data or the probability scores from the base model.
Returns:
namedtuple: A namedtupe that holds
y_pred: ndarray of shape (n_samples,)
Predicted labels of the test points.
y_prob: ndarray of shape (n_samples, n_classes)
Predicted probability scores of the classes.
"""
y_prob = self.calib_model.predict_proba(X)
if len(np.shape(y_prob)) == 1:
y_pred_labels = y_prob > 0.5
else:
y_pred_labels = np.argmax(y_prob, axis=1)
Result = namedtuple('res', ['y_pred', 'y_prob'])
res = Result(y_pred_labels, y_prob)
return res
<s> from .classification_calibration import ClassificationCalibration
<s> import numpy as np
from scipy.stats import norm
from sklearn.metrics import mean_squared_error, r2_score
from ..utils.misc import fitted_ucc_w_nullref
def picp(y_true, y_lower, y_upper):
"""
Prediction Interval Coverage Probability (PICP). Computes the fraction of samples for which the grounds truth lies
within predicted interval. Measures the prediction interval calibration for regression.
Args:
y_true: Ground truth
y_lower: predicted lower bound
y_upper: predicted upper bound
Returns:
float: the fraction of samples for which the grounds truth lies within predicted interval.
"""
satisfies_upper_bound = y_true <= y_upper
satisfies_lower_bound = y_true >= y_lower
return np.mean(satisfies_upper_bound * satisfies_lower_bound)
def mpiw(y_lower, y_upper):
"""
Mean Prediction Interval Width (MPIW). Computes the average width of the the prediction intervals. Measures the
sharpness of intervals.
Args:
y_lower: predicted lower bound
y_upper: predicted upper bound
Returns:
float: the average width the prediction interval across samples.
"""
return np.mean(np.abs(y_lower - y_upper))
def auucc_gain(y_true, y_mean, y_lower, y_upper):
""" Computes the Area Under the Uncertainty Characteristics Curve (AUUCC) gain wrt to a null reference
with constant band.
Args:
y_true: Ground truth
y_mean: predicted mean
y_lower: predicted lower bound
y_upper: predicted upper bound
Returns:
float: AUUCC gain
"""
u = fitted_ucc_w_nullref(y_true, y_mean, y_lower, y_upper)
auucc = u.get_AUUCC()
assert(isinstance(auucc, list) and len(auucc) == 2), "Failed to calculate auucc gain"
assert (not np.isclose(auucc[1], 0.)), "Failed to calculate auucc gain"
auucc_gain = (auucc[1]-auucc[0])/auucc[0]
return auucc_gain
def negative_log_likelihood_Gaussian(y_true, y_mean, y_lower, y_upper):
""" Computes Gaussian negative_log_likelihood assuming symmetric band around the mean.
Args:
y_true: Ground truth
y_mean: predicted mean
y_lower: predicted lower bound
y_upper: predicted upper bound
Returns:
float: nll
"""
y_std = (y_upper - y_lower) / 4.0
nll = np.mean(-norm.logpdf(y_true.squeeze(), loc=y_mean.squeeze(), scale=y_std.squeeze()))
return nll
def compute_regression_metrics(y_true, y_mean, y_lower, y_upper, option="all", nll_fn=None):
"""
Computes the metrics specified in the option which can be string or a list of strings. Default option `all` computes
the ["rmse", "nll", "auucc_gain", "picp", "mpiw", "r2"] metrics.
Args:
y_true: Ground truth
y_mean: predicted mean
y_lower: predicted lower bound
y_upper: predicted upper bound
option: string or list of string contained the name of the metrics to be computed.
nll_fn: function that evaluates NLL, if None, then computes Gaussian NLL using y_mean and y_lower.
Returns:
dict: dictionary containing the computed metrics.
"""
assert y_true.shape == y_mean.shape, "y_true shape: {}, y_mean shape: {}".format(y_true.shape, y_mean.shape)
assert y_true.shape == y_lower.shape, "y_true shape: {}, y_mean shape: {}".format(y_true.shape, y_lower.shape)
assert y_true.shape == y_upper.shape, "y_true shape: {}, y_mean shape: {}".format(y_true.shape, y_upper.shape)
results = {}
if not isinstance(option, list):
if option == "all":
option_list = ["rmse", "nll", "auucc_gain", "picp", "mpiw", "r2"]
else:
option_list = [option]
if "rmse" in option_list:
results["rmse"] = mean_squared_error(y_true, y_mean, squared=False)
if "nll" in option_list:
if nll_fn is None:
nll = negative_log_likelihood_Gaussian(y_true, y_mean, y_lower, y_upper)
results["nll"] = nll
else:
results["nll"] = np.mean(nll_fn(y_true))
if "auucc_gain" in option_list:
gain = auucc_gain(y_true, y_mean, y_lower, y_upper)
results["auucc_gain"] = gain
if "picp" in option_list:
results["picp"] = picp(y_true, y_lower, y_upper)
if "mpiw" in option_list:
results["mpiw"] = mpiw(y_lower, y_upper)
if "r2" in option_list:
results["r2"] = r2_score(y_true, y_mean)
return results
def _check_not_tuple_of_2_elements(obj, obj_name='obj'):
"""Check object is not tuple or does not have 2 elements."""
if not isinstance(obj, tuple) or len(obj) != 2:
raise TypeError('%s must be a tuple of 2 elements.' % obj_name)
def plot_uncertainty_distribution(dist, show_quantile_dots=False, qd_sample=20, qd_bins=7,
ax=None, figsize=None, dpi=None,
title='Predicted Distribution', xlims=None, xlabel='Prediction', ylabel='Density', **kwargs):
"""
Plot the uncertainty distribution for a single distribution.
Args:
dist: scipy.stats._continuous_distns.
A scipy distribution object.
show_quantile_dots: boolean.
Whether to show quantil dots on top of the density plot.
qd_sample: int.
Number of dots for the quantile dot plot.
qd_bins: int.
Number of bins for the quantile dot plot.
ax: matplotlib.axes.Axes or None, optional (default=None).
Target axes instance. If None, new figure and axes will be created.
figsize: tuple of 2 elements or None, optional (default=None).
Figure size.
dpi : int or None, optional (default=None).
Resolution of the figure.
title : string or None, optional (default=Prediction Distribution)
Axes title.
If None, title is disabled.
xlims : tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.xlim()``.
xlabel : string or None, optional (default=Prediction)
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional (default=Density)
Y-axis title label.
If None, title is disabled.
Returns:
matplotlib.axes.Axes: ax : The plot with prediction distribution.
"""
import matplotlib.pyplot as plt
if ax is None:
if figsize is not None:
_check_not_tuple_of_2_elements(figsize, 'figsize')
_, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
x = np.linspace(dist.ppf(0.01), dist.ppf(0.99), 100)
ax.plot(x, dist.pdf(x), **kwargs)
if show_quantile_dots:
from matplotlib.patches import Circle
from matplotlib.collections import PatchCollection
import matplotlib.ticker as ticker
data = dist.rvs(size=10000)
p_less_than_x = np.linspace(1 / qd_sample / 2, 1 - (1 / qd_sample / 2), qd_sample)
x_ = np.percentile(data, p_less_than_x * 100) # Inverce CDF (ppf)
# Create bins
hist = np.histogram(x_, bins=qd_bins)
bins, edges = hist
radius = (edges[1] - edges[0]) / 2
ax2 = ax.twinx()
patches = []
max_y = 0
for i in range(qd_bins):
x_bin = (edges[i + 1] + edges[i]) / 2
y_bins = [(i + 1) * (radius * 2) for i in range(bins[i])]
max_y = max(y_bins) if max(y_bins) > max_y else max_y
for _, y_bin in enumerate(y_bins):
circle = Circle((x_bin, y_bin), radius)
patches.append(circle)
p = PatchCollection(patches, alpha=0.4)
ax2.add_collection(p)
# Axis tweek
y_scale = (max_y + radius) / max(dist.pdf(x))
ticks_y = ticker.FuncFormatter(lambda x, pos: '{0:g}'.format(x_ / y_scale))
ax2.yaxis.set_major_formatter(ticks_y)
ax2.set_yticklabels([])
if xlims is not None:
ax2.set_xlim(left=xlims[0], right=xlims[1])
else:
ax2.set_xlim([min( |
x_) - radius, max(x) + radius])
ax2.set_ylim([0, max_y + radius])
ax2.set_aspect(1)
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
return ax
def plot_picp_by_feature(x_test, y_test, y_test_pred_lower_total, y_test_pred_upper_total, num_bins=10,
ax=None, figsize=None, dpi=None, xlims=None, ylims=None, xscale="linear",
title=None, xlabel=None, ylabel=None):
"""
Plot how prediction uncertainty varies across the entire range of a feature.
Args:
x_test: One dimensional ndarray.
Feature column of the test dataset.
y_test: One dimensional ndarray.
Ground truth label of the test dataset.
y_test_pred_lower_total: One dimensional ndarray.
Lower bound of the total uncertainty range.
y_test_pred_upper_total: One dimensional ndarray.
Upper bound of the total uncertainty range.
num_bins: int.
Number of bins used to discritize x_test into equal-sample-sized bins.
ax: matplotlib.axes.Axes or None, optional (default=None). Target axes instance. If None, new figure and axes will be created.
figsize: tuple of 2 elements or None, optional (default=None). Figure size.
dpi : int or None, optional (default=None). Resolution of the figure.
xlims : tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.xlim()``.
ylims: tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.ylim()``.
xscale: Passed to ``ax.set_xscale()``.
title : string or None, optional
Axes title.
If None, title is disabled.
xlabel : string or None, optional
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional
Y-axis title label.
If None, title is disabled.
Returns:
matplotlib.axes.Axes: ax : The plot with PICP scores binned by a feature.
"""
from scipy.stats.mstats import mquantiles
import matplotlib.pyplot as plt
if ax is None:
if figsize is not None:
_check_not_tuple_of_2_elements(figsize, 'figsize')
_, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
x_uniques_sorted = np.sort(np.unique(x_test))
num_unique = len(x_uniques_sorted)
sample_bin_ids = np.searchsorted(x_uniques_sorted, x_test)
if len(x_uniques_sorted) > 10: # bin the values
q_bins = mquantiles(x_test, np.histogram_bin_edges([], bins=num_bins-1, range=(0.0, 1.0))[1:])
q_sample_bin_ids = np.digitize(x_test, q_bins)
picps = np.array([picp(y_test[q_sample_bin_ids==bin], y_test_pred_lower_total[q_sample_bin_ids==bin],
y_test_pred_upper_total[q_sample_bin_ids==bin]) for bin in range(num_bins)])
unique_sample_bin_ids = np.digitize(x_uniques_sorted, q_bins)
picp_replicated = [len(x_uniques_sorted[unique_sample_bin_ids == bin]) * [picps[bin]] for bin in range(num_bins)]
picp_replicated = np.array([item for sublist in picp_replicated for item in sublist])
else:
picps = np.array([picp(y_test[sample_bin_ids == bin], y_test_pred_lower_total[sample_bin_ids == bin],
y_test_pred_upper_total[sample_bin_ids == bin]) for bin in range(num_unique)])
picp_replicated = picps
ax.plot(x_uniques_sorted, picp_replicated, label='PICP')
ax.axhline(0.95, linestyle='--', label='95%')
ax.set_ylabel('PICP')
ax.legend(loc='best')
if title is None:
title = 'Test data overall PICP: {:.2f} MPIW: {:.2f}'.format(
picp(y_test,
y_test_pred_lower_total,
y_test_pred_upper_total),
mpiw(y_test_pred_lower_total,
y_test_pred_upper_total))
if xlims is not None:
ax.set_xlim(left=xlims[0], right=xlims[1])
if ylims is not None:
ax.set_ylim(bottom=ylims[0], top=ylims[1])
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if xscale is not None:
ax.set_xscale(xscale)
return ax
def plot_uncertainty_by_feature(x_test, y_test_pred_mean, y_test_pred_lower_total, y_test_pred_upper_total,
y_test_pred_lower_epistemic=None, y_test_pred_upper_epistemic=None,
ax=None, figsize=None, dpi=None, xlims=None, xscale="linear",
title=None, xlabel=None, ylabel=None):
"""
Plot how prediction uncertainty varies across the entire range of a feature.
Args:
x_test: one dimensional ndarray.
Feature column of the test dataset.
y_test_pred_mean: One dimensional ndarray.
Model prediction for the test dataset.
y_test_pred_lower_total: One dimensional ndarray.
Lower bound of the total uncertainty range.
y_test_pred_upper_total: One dimensional ndarray.
Upper bound of the total uncertainty range.
y_test_pred_lower_epistemic: One dimensional ndarray.
Lower bound of the epistemic uncertainty range.
y_test_pred_upper_epistemic: One dimensional ndarray.
Upper bound of the epistemic uncertainty range.
ax: matplotlib.axes.Axes or None, optional (default=None). Target axes instance. If None, new figure and axes will be created.
figsize: tuple of 2 elements or None, optional (default=None). Figure size.
dpi : int or None, optional (default=None). Resolution of the figure.
xlims : tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.xlim()``.
xscale: Passed to ``ax.set_xscale()``.
title : string or None, optional
Axes title.
If None, title is disabled.
xlabel : string or None, optional
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional
Y-axis title label.
If None, title is disabled.
Returns:
matplotlib.axes.Axes: ax : The plot with model's uncertainty binned by a feature.
"""
import matplotlib.pyplot as plt
if ax is None:
if figsize is not None:
_check_not_tuple_of_2_elements(figsize, 'figsize')
_, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
x_uniques_sorted = np.sort(np.unique(x_test))
y_pred_var = ((y_test_pred_upper_total - y_test_pred_lower_total) / 4.0)**2
agg_y_std = np.array([np.sqrt(np.mean(y_pred_var[x_test==x])) for x in x_uniques_sorted])
agg_y_mean = np.array([np.mean(y_test_pred_mean[x_test==x]) for x in x_uniques_sorted])
ax.plot(x_uniques_sorted, agg_y_mean, '-b', lw=2, label='mean prediction')
ax.fill_between(x_uniques_sorted,
agg_y_mean - 2.0 * agg_y_std,
agg_y_mean + 2.0 * agg_y_std,
alpha=0.3, label='total uncertainty')
if y_test_pred_lower_epistemic is not None:
y_pred_var_epistemic = ((y_test_pred_upper_epistemic - y_test_pred_lower_epistemic) / 4.0)**2
agg_y_std_epistemic = np.array([np.sqrt(np.mean(y_pred_var_epistemic[x_test==x])) for x in x_uniques_sorted])
ax.fill_between(x_uniques_sorted,
agg_y_mean - 2.0 * agg_y_std_epistemic,
agg_y_mean + 2.0 * agg_y_std_epistemic,
alpha=0.3, label='model uncertainty')
ax.legend(loc='best')
if xlims is not None:
ax.set_xlim(left=xlims[0], right=xlims[1])
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if xscale is not None:
ax.set_xscale(xscale)
return ax
<s> import numpy as np
import pandas as pd
from scipy.stats import entropy
from sklearn.metrics import roc_auc_score, log_loss, accuracy_score
def entropy_based_uncertainty_decomposition(y_prob_samples):
""" Entropy based decomposition [2]_ of predictive uncertainty into aleatoric and epistemic components.
References:
.. [2] Depeweg, S., Hernandez-Lobato, J. M., Doshi-Velez, F., & Udluft, S. (2018, July). Decomposition of
uncertainty in Bayesian deep learning for efficient and risk-sensitive learning. In International Conference
on Machine Learning (pp. 1184-1193). PMLR.
Args:
y_prob_samples: list of array-like of shape (n_samples, n_classes) containing class prediction probabilities
corresponding to samples from the model posterior.
Returns:
tuple:
- total_uncertainty: entropy of the predictive distribution.
- aleatoric_uncertainty: aleatoric component of the total_uncertainty.
- epistemic_uncertainty: epistemic component of the total_uncertainty.
"""
y_preds_samples_stacked = np.stack(y_prob_samples)
preds_mean = np.mean(y_preds_samples_stacked, 0)
total_uncertainty = entropy(preds_mean, axis=1)
aleatoric_uncertainty = np.mean(
np.concatenate([entropy(y_pred, axis=1).reshape(-1, 1) for y_pred in y_prob_samples], axis=1),
axis=1)
epistemic_uncertainty = total_uncertainty - aleatoric_uncertainty
return total_uncertainty, aleatoric_uncertainty, epistemic_uncertainty
def multiclass_brier_score(y_true, y_prob):
"""Brier score for multi-class.
Args:
y_true: array-like of shape (n_samples,)
ground truth labels.
y_prob: array-like of shape (n_samples, n_classes).
Probability scores from the base model.
Returns:
float: Brier score.
"""
assert len(y_prob.shape) > 1, "y_prob should be array-like of shape (n_samples, n_classes)"
y_target = np.zeros_like(y_prob)
y_target[:, y_true] = 1.0
return np.mean(np.sum((y_target - y_prob) ** 2, axis=1))
def area_under_risk_rejection_rate_curve(y_true, y_prob, y_pred=None, selection_scores=None, risk_func=accuracy_score,
attributes=None, num_bins=10, subgroup_ids=None,
return_counts=False):
""" Computes risk vs rejection rate curve and the area under this curve. Similar to risk-coverage curves [3]_ where
coverage instead of rejection rate is used.
References:
.. [3] Franc, Vojtech, and Daniel Prusa. "On discriminative learning of prediction uncertainty."
In International Conference on Machine Learning, pp. 1963-1971. 2019.
Args:
y_true: array-like of shape (n_samples,)
ground truth labels.
y_prob: array-like of shape (n_samples, n_classes).
Probability scores from the base model.
y_pred: array-like of shape (n_samples,)
predicted labels.
selection_scores: scores corresponding to certainty in the predicted labels.
risk_func: risk function under consideration.
attributes: (optional) if risk function is a fairness metric also pass the protected attribute name.
num_bins: number of bins.
subgroup_ids: (optional) selectively compute risk on a subgroup of the samples specified by subgroup_ids.
return_counts: set to True to return counts also.
Returns:
float or tuple:
- aurrrc (float): area under risk rejection rate curve.
- rejection_rates (list): rejection rates for each bin (returned only if return_counts is True).
- selection_thresholds (list): selection threshold for each bin (returned only if return_counts is True).
- risks (list): risk in each bin (returned only if return_counts is True).
"""
if selection_scores is None:
assert len(y_prob.shape) > 1, "y_prob should be array-like of shape (n_samples, n_classes)"
selection_scores = y_prob[np.arange(y_prob.shape[0]), np.argmax(y_prob, axis=1)]
if y_pred is None:
assert len(y_prob.shape) > 1, "y_prob should be array-like of shape (n_samples, n_classes)"
y_pred = np.argmax(y_prob, axis=1)
order = np.argsort(selection_scores)[::-1]
rejection_rates = []
selection_thresholds = []
risks = []
for bin_id in range(num_bins):
samples_in_ |
bin = len(y_true) // num_bins
selection_threshold = selection_scores[order[samples_in_bin * (bin_id+1)-1]]
selection_thresholds.append(selection_threshold)
ids = selection_scores >= selection_threshold
if sum(ids) > 0:
if attributes is None:
if isinstance(y_true, pd.Series):
y_true_numpy = y_true.values
else:
y_true_numpy = y_true
if subgroup_ids is None:
risk_value = 1.0 - risk_func(y_true_numpy[ids], y_pred[ids])
else:
if sum(subgroup_ids & ids) > 0:
risk_value = 1.0 - risk_func(y_true_numpy[subgroup_ids & ids], y_pred[subgroup_ids & ids])
else:
risk_value = 0.0
else:
risk_value = risk_func(y_true.iloc[ids], y_pred[ids], prot_attr=attributes)
else:
risk_value = 0.0
risks.append(risk_value)
rejection_rates.append(1.0 - 1.0 * sum(ids) / len(y_true))
aurrrc = np.nanmean(risks)
if not return_counts:
return aurrrc
else:
return aurrrc, rejection_rates, selection_thresholds, risks
def expected_calibration_error(y_true, y_prob, y_pred=None, num_bins=10, return_counts=False):
""" Computes the reliability curve and the expected calibration error [1]_ .
References:
.. [1] Chuan Guo, Geoff Pleiss, Yu Sun, Kilian Q. Weinberger; Proceedings of the 34th International Conference
on Machine Learning, PMLR 70:1321-1330, 2017.
Args:
y_true: array-like of shape (n_samples,)
ground truth labels.
y_prob: array-like of shape (n_samples, n_classes).
Probability scores from the base model.
y_pred: array-like of shape (n_samples,)
predicted labels.
num_bins: number of bins.
return_counts: set to True to return counts also.
Returns:
float or tuple:
- ece (float): expected calibration error.
- confidences_in_bins: average confidence in each bin (returned only if return_counts is True).
- accuracies_in_bins: accuracy in each bin (returned only if return_counts is True).
- frac_samples_in_bins: fraction of samples in each bin (returned only if return_counts is True).
"""
assert len(y_prob.shape) > 1, "y_prob should be array-like of shape (n_samples, n_classes)"
num_samples, num_classes = y_prob.shape
top_scores = np.max(y_prob, axis=1)
if y_pred is None:
y_pred = np.argmax(y_prob, axis=1)
if num_classes == 2:
bins_edges = np.histogram_bin_edges([], bins=num_bins, range=(0.5, 1.0))
else:
bins_edges = np.histogram_bin_edges([], bins=num_bins, range=(0.0, 1.0))
non_boundary_bin_edges = bins_edges[1:-1]
bin_centers = (bins_edges[1:] + bins_edges[:-1])/2
sample_bin_ids = np.digitize(top_scores, non_boundary_bin_edges)
num_samples_in_bins = np.zeros(num_bins)
accuracies_in_bins = np.zeros(num_bins)
confidences_in_bins = np.zeros(num_bins)
for bin in range(num_bins):
num_samples_in_bins[bin] = len(y_pred[sample_bin_ids == bin])
if num_samples_in_bins[bin] > 0:
accuracies_in_bins[bin] = np.sum(y_true[sample_bin_ids == bin] == y_pred[sample_bin_ids == bin]) / num_samples_in_bins[bin]
confidences_in_bins[bin] = np.sum(top_scores[sample_bin_ids == bin]) / num_samples_in_bins[bin]
ece = np.sum(
num_samples_in_bins * np.abs(accuracies_in_bins - confidences_in_bins) / num_samples
)
frac_samples_in_bins = num_samples_in_bins / num_samples
if not return_counts:
return ece
else:
return ece, confidences_in_bins, accuracies_in_bins, frac_samples_in_bins, bin_centers
def compute_classification_metrics(y_true, y_prob, option='all'):
"""
Computes the metrics specified in the option which can be string or a list of strings. Default option `all` computes
the [aurrrc, ece, auroc, nll, brier, accuracy] metrics.
Args:
y_true: array-like of shape (n_samples,)
ground truth labels.
y_prob: array-like of shape (n_samples, n_classes).
Probability scores from the base model.
option: string or list of string contained the name of the metrics to be computed.
Returns:
dict: a dictionary containing the computed metrics.
"""
results = {}
if not isinstance(option, list):
if option == "all":
option_list = ["aurrrc", "ece", "auroc", "nll", "brier", "accuracy"]
else:
option_list = [option]
if "aurrrc" in option_list:
results["aurrrc"] = area_under_risk_rejection_rate_curve(y_true=y_true, y_prob=y_prob)
if "ece" in option_list:
results["ece"] = expected_calibration_error(y_true=y_true, y_prob=y_prob)
if "auroc" in option_list:
results["auroc"], _ = roc_auc_score(y_true=y_true, y_score=y_prob)
if "nll" in option_list:
results["nll"] = log_loss(y_true=y_true, y_pred=np.argmax(y_prob, axis=1))
if "brier" in option_list:
results["brier"] = multiclass_brier_score(y_true=y_true, y_prob=y_prob)
if "accuracy" in option_list:
results["accuracy"] = accuracy_score(y_true=y_true, y_pred=np.argmax(y_prob, axis=1))
return results
def plot_reliability_diagram(y_true, y_prob, y_pred, plot_label=[""], num_bins=10):
"""
Plots the reliability diagram showing the calibration error for different confidence scores. Multiple curves
can be plot by passing data as lists.
Args:
y_true: array-like or or a list of array-like of shape (n_samples,)
ground truth labels.
y_prob: array-like or or a list of array-like of shape (n_samples, n_classes).
Probability scores from the base model.
y_pred: array-like or or a list of array-like of shape (n_samples,)
predicted labels.
plot_label: (optional) list of names identifying each curve.
num_bins: number of bins.
Returns:
tuple:
- ece_list: ece: list containing expected calibration error for each curve.
- accuracies_in_bins_list: list containing binned average accuracies for each curve.
- frac_samples_in_bins_list: list containing binned sample frequencies for each curve.
- confidences_in_bins_list: list containing binned average confidence for each curve.
"""
import matplotlib.pyplot as plt
if not isinstance(y_true, list):
y_true, y_prob, y_pred = [y_true], [y_prob], [y_pred]
if len(plot_label) != len(y_true):
raise ValueError('y_true and plot_label should be of same length.')
ece_list = []
accuracies_in_bins_list = []
frac_samples_in_bins_list = []
confidences_in_bins_list = []
for idx in range(len(plot_label)):
ece, confidences_in_bins, accuracies_in_bins, frac_samples_in_bins, bins = expected_calibration_error(y_true[idx],
y_prob[idx],
y_pred[idx],
num_bins=num_bins,
return_counts=True)
ece_list.append(ece)
accuracies_in_bins_list.append(accuracies_in_bins)
frac_samples_in_bins_list.append(frac_samples_in_bins)
confidences_in_bins_list.append(confidences_in_bins)
fig = plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
for idx in range(len(plot_label)):
plt.plot(bins, frac_samples_in_bins_list[idx], 'o-', label=plot_label[idx])
plt.title("Confidence Histogram")
plt.xlabel("Confidence")
plt.ylabel("Fraction of Samples")
plt.grid()
plt.ylim([0.0, 1.0])
plt.legend()
plt.subplot(1, 2, 2)
for idx in range(len(plot_label)):
plt.plot(bins, accuracies_in_bins_list[idx], 'o-',
label="{} ECE = {:.2f}".format(plot_label[idx], ece_list[idx]))
plt.plot(np.linspace(0, 1, 50), np.linspace(0, 1, 50), 'b.', label="Perfect Calibration")
plt.title("Reliability Plot")
plt.xlabel("Confidence")
plt.ylabel("Accuracy")
plt.grid()
plt.legend()
plt.show()
return ece_list, accuracies_in_bins_list, frac_samples_in_bins_list, confidences_in_bins_list
def plot_risk_vs_rejection_rate(y_true, y_prob, y_pred, selection_scores=None, plot_label=[""], risk_func=None,
attributes=None, num_bins=10, subgroup_ids=None):
"""
Plots the risk vs rejection rate curve showing the risk for different rejection rates. Multiple curves
can be plot by passing data as lists.
Args:
y_true: array-like or or a list of array-like of shape (n_samples,)
ground truth labels.
y_prob: array-like or or a list of array-like of shape (n_samples, n_classes).
Probability scores from the base model.
y_pred: array-like or or a list of array-like of shape (n_samples,)
predicted labels.
selection_scores: ndarray or a list of ndarray containing scores corresponding to certainty in the predicted labels.
risk_func: risk function under consideration.
attributes: (optional) if risk function is a fairness metric also pass the protected attribute name.
num_bins: number of bins.
subgroup_ids: (optional) ndarray or a list of ndarray containing subgroup_ids to selectively compute risk on a
subgroup of the samples specified by subgroup_ids.
Returns:
tuple:
- aurrrc_list: list containing the area under risk rejection rate curves.
- rejection_rate_list: list containing the binned rejection rates.
- selection_thresholds_list: list containing the binned selection thresholds.
- risk_list: list containing the binned risks.
"""
import matplotlib.pyplot as plt
if not isinstance(y_true, list):
y_true, y_prob, y_pred, selection_scores, subgroup_ids = [y_true], [y_prob], [y_pred], [selection_scores], [subgroup_ids]
if len(plot_label) != len(y_true):
raise ValueError('y_true and plot_label should be of same length.')
aurrrc_list = []
rejection_rate_list = []
risk_list = []
selection_thresholds_list = []
for idx in range(len(plot_label)):
aursrc, rejection_rates, selection_thresholds, risks = area_under_risk_rejection_rate_curve(
y_true[idx],
y_prob[idx],
y_pred[idx],
selection_scores=selection_scores[idx],
risk_func=risk_func,
attributes=attributes,
num_bins=num_bins,
subgroup_ids=subgroup_ids[idx],
return_counts=True
)
aurrrc_list.append(aursrc)
rejection_rate_list.append(rejection_rates)
risk_list.append(risks)
selection_thresholds_list.append(selection_thresholds)
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
for idx in range(len(plot_label)):
plt.plot(rejection_rate_list[idx], risk_list[idx], label="{} AURRRC={:.5f}".format(plot_label[idx], aurrrc_list[idx]))
plt.legend(loc="best")
plt.xlabel("Rejection Rate")
if risk_func is None:
ylabel = "Prediction Error Rate"
else:
if 'accuracy' in risk_func.__name__:
ylabel = "1.0 - " + risk_func.__name__
else:
ylabel = risk_func.__name__
plt.ylabel(ylabel)
plt.title("Risk vs Rejection Rate Plot")
plt.grid()
plt.subplot(1, 2, 2)
for idx in range(len(plot_label)):
plt.plot(selection_thresholds_list[idx], risk_list[idx], label="{}".format(plot_label[idx]))
plt.legend(loc="best")
plt.xlabel("Selection Threshold")
if risk_func is None:
ylabel = "Prediction Error Rate"
else:
if 'accuracy' in risk_func.__name__:
ylabel = "1.0 - " + risk_func.__name__
else:
ylabel = risk_func.__name__
plt.ylabel(ylabel |
)
plt.title("Risk vs Selection Threshold Plot")
plt.grid()
plt.show()
return aurrrc_list, rejection_rate_list, selection_thresholds_list, risk_list
<s> from .classification_metrics import expected_calibration_error, area_under_risk_rejection_rate_curve, \\
compute_classification_metrics, entropy_based_uncertainty_decomposition
from .regression_metrics import picp, mpiw, compute_regression_metrics, plot_uncertainty_distribution, \\
plot_uncertainty_by_feature, plot_picp_by_feature
from .uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve
<s> from copy import deepcopy
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import simps, trapz
from sklearn.isotonic import IsotonicRegression
DEFAULT_X_AXIS_NAME = 'excess'
DEFAULT_Y_AXIS_NAME = 'missrate'
class UncertaintyCharacteristicsCurve:
"""
Class with main functions of the Uncertainty Characteristics Curve (UCC).
"""
def __init__(self, normalize=True, precompute_bias_data=True):
"""
:param normalize: set initial axes normalization flag (can be changed via set_coordinates())
:param precompute_bias_data: if True, fit() will compute statistics necessary to generate bias-based
UCCs (in addition to the scale-based ones). Skipping this precomputation may speed up the fit() call
if bias-based UCC is not needed.
"""
self.axes_name2idx = {"missrate": 1, "bandwidth": 2, "excess": 3, "deficit": 4}
self.axes_idx2descr = {1: "Missrate", 2: "Bandwidth", 3: "Excess", 4: "Deficit"}
self.x_axis_idx = None
self.y_axis_idx = None
self.norm_x_axis = False
self.norm_y_axis = False
self.std_unit = None
self.normalize = normalize
self.d = None
self.gt = None
self.lb = None
self.ub = None
self.precompute_bias_data = precompute_bias_data
self.set_coordinates(x_axis_name=DEFAULT_X_AXIS_NAME, y_axis_name=DEFAULT_Y_AXIS_NAME, normalize=normalize)
def set_coordinates(self, x_axis_name=None, y_axis_name=None, normalize=None):
"""
Assigns user-specified type to the axes and normalization behavior (sticky).
:param x_axis_name: None-> unchanged, or name from self.axes_name2idx
:param y_axis_name: ditto
:param normalize: True/False will activate/deactivate norming for specified axes. Behavior for
Axes_name that are None will not be changed.
Value None will leave norm status unchanged.
Note, axis=='missrate' will never get normalized, even with normalize == True
:return: none
"""
normalize = self.normalize if normalize is None else normalize
if x_axis_name is None and self.x_axis_idx is None:
raise ValueError("ERROR(UCC): x-axis has not been defined.")
if y_axis_name is None and self.y_axis_idx is None:
raise ValueError("ERROR(UCC): y-axis has not been defined.")
if x_axis_name is None and y_axis_name is None and normalize is not None:
# just set normalization on/off for both axes and return
self.norm_x_axis = False if x_axis_name == 'missrate' else normalize
self.norm_y_axis = False if y_axis_name == 'missrate' else normalize
return
if x_axis_name is not None:
self.x_axis_idx = self.axes_name2idx[x_axis_name]
self.norm_x_axis = False if x_axis_name == 'missrate' else normalize
if y_axis_name is not None:
self.y_axis_idx = self.axes_name2idx[y_axis_name]
self.norm_y_axis = False if y_axis_name == 'missrate' else normalize
def set_std_unit(self, std_unit=None):
"""
Sets the UCC's unit to be used when displaying normalized axes.
:param std_unit: if None, the unit will be calculated as stddev of the ground truth data
(ValueError raised if data has not been set at this point)
or set to the user-specified value.
:return:
"""
if std_unit is None: # set it to stddev of data
if self.gt is None:
raise ValueError("ERROR(UCC): No data specified - cannot set stddev unit.")
self.std_unit = np.std(self.gt)
if np.isclose(self.std_unit, 0.):
print("WARN(UCC): data-based stddev is zero - resetting axes unit to 1.")
self.std_unit = 1.
else:
self.std_unit = float(std_unit)
def fit(self, X, gt):
"""
Calculates internal arrays necessary for other methods (plotting, auc, cost minimization).
Re-entrant.
:param X: [numsamples, 3] numpy matrix, or list of numpy matrices.
Col 1: predicted values
Col 2: lower band (deviate) wrt predicted value (always positive)
Col 3: upper band wrt predicted value (always positive)
If list is provided, all methods will output corresponding metrics as lists as well!
:param gt: Ground truth array (i.e.,the 'actual' values corresponding to predictions in X
:return: self
"""
if not isinstance(X, list):
X = [X]
newX = []
for x in X:
assert (isinstance(x, np.ndarray) and len(x.shape) == 2 and x.shape[1] == 3 and x.shape[0] == len(gt))
newX.append(self._sanitize_input(x))
self.d = [gt - x[:, 0] for x in newX]
self.lb = [x[:, 1] for x in newX]
self.ub = [x[:, 2] for x in newX]
self.gt = gt
self.set_std_unit()
self.plotdata_for_scale = []
self.plotdata_for_bias = []
# precompute plotdata:
for i in range(len(self.d)):
self.plotdata_for_scale.append(self._calc_plotdata(self.d[i], self.lb[i], self.ub[i], vary_bias=False))
if self.precompute_bias_data:
self.plotdata_for_bias.append(self._calc_plotdata(self.d[i], self.lb[i], self.ub[i], vary_bias=True))
return self
def minimize_cost(self, x_axis_cost=.5, y_axis_cost=.5, augment_cost_by_normfactor=True,
search=('scale', 'bias')):
"""
Find minima of a linear cost function for each component.
Cost function C = x_axis_cost * x_axis_value + y_axis_cost * y_axis_value.
A minimum can occur in the scale-based or bias-based UCC (this can be constrained by the 'search' arg).
The function returns a 'recipe' how to achieve the corresponding minimum, for each component.
:param x_axis_cost: weight of one unit on x_axis
:param y_axis_cost: weight of one unit on y_axis
:param augment_cost_by_normfactor: when False, the cost multipliers will apply as is. If True, they will be
pre-normed by the corresponding axis norm (where applicable), to account for range differences between axes.
:param search: list of types over which minimization is to be performed, valid elements are 'scale' and 'bias'.
:return: list of dicts - one per component, or a single dict, if there is only one component. Dict keys are -
'operation': can be 'bias' (additive) or 'scale' (multiplicative), 'modvalue': value to multiply by or to
add to error bars to achieve the minimum, 'new_x'/'new_y': new coordinates (operating point) with that
minimum, 'cost': new cost at minimum point, 'original_cost': original cost (original operating point).
"""
if self.d is None:
raise ValueError("ERROR(UCC): call fit() prior to using this method.")
if augment_cost_by_normfactor:
if self.norm_x_axis:
x_axis_cost /= self.std_unit
if self.norm_y_axis:
y_axis_cost /= self.std_unit
print("INFO(UCC): Pre-norming costs by corresp. std deviation: new x_axis_cost = %.4f, y_axis_cost = %.4f" %
(x_axis_cost, y_axis_cost))
if isinstance(search, tuple):
search = list(search)
if not isinstance(search, list):
search = [search]
min_costs = []
for d in range(len(self.d)):
# original OP cost
m, b, e, df = self._calc_missrate_bandwidth_excess_deficit(self.d[d], self.lb[d], self.ub[d])
original_cost = x_axis_cost * [0., m, b, e, df][self.x_axis_idx] + y_axis_cost * [0., m, b, e, df][
self.y_axis_idx]
plotdata = self.plotdata_for_scale[d]
cost_scale, minidx_scale = self._find_min_cost_in_component(plotdata, self.x_axis_idx, self.y_axis_idx,
x_axis_cost, y_axis_cost)
mcf_scale_multiplier = plotdata[minidx_scale][0]
mcf_scale_x = plotdata[minidx_scale][self.x_axis_idx]
mcf_scale_y = plotdata[minidx_scale][self.y_axis_idx]
if 'bias' in search:
if not self.precompute_bias_data:
raise ValueError(
"ERROR(UCC): Cannot perform minimization - instantiated without bias data computation")
plotdata = self.plotdata_for_bias[d]
cost_bias, minidx_bias = self._find_min_cost_in_component(plotdata, self.x_axis_idx, self.y_axis_idx,
x_axis_cost, y_axis_cost)
mcf_bias_add = plotdata[minidx_bias][0]
mcf_bias_x = plotdata[minidx_bias][self.x_axis_idx]
mcf_bias_y = plotdata[minidx_bias][self.y_axis_idx]
if 'bias' in search and 'scale' in search:
if cost_bias < cost_scale:
min_costs.append({'operation': 'bias', 'cost': cost_bias, 'modvalue': mcf_bias_add,
'new_x': mcf_bias_x, 'new_y': mcf_bias_y, 'original_cost': original_cost})
else:
min_costs.append({'operation': 'scale', 'cost': cost_scale, 'modvalue': mcf_scale_multiplier,
'new_x': mcf_scale_x, 'new_y': mcf_scale_y, 'original_cost': original_cost})
elif 'scale' in search:
min_costs.append({'operation': 'scale', 'cost': cost_scale, 'modvalue': mcf_scale_multiplier,
'new_x': mcf_scale_x, 'new_y': mcf_scale_y, 'original_cost': original_cost})
elif 'bias' in search:
min_costs.append({'operation': 'bias', 'cost': cost_bias, 'modvalue': mcf_bias_add,
'new_x': mcf_bias_x, 'new_y': mcf_bias_y, 'original_cost': original_cost})
else:
raise ValueError("(ERROR): Unknown search element (%s) requested." % ",".join(search))
if len(min_costs) < 2:
return min_costs[0]
else:
return min_costs
def get_specific_operating_point(self, req_x_axis_value=None, req_y_axis_value=None,
req_critical_value=None, vary_bias=False):
"""
Finds corresponding operating point on the current UCC, given a point on either x or y axis. Returns
a list of recipes how to achieve the point (x,y), for each component. If there is only one component,
returns a single recipe dict.
:param req_x_axis_value: requested x value on UCC (normalization status is taken from current display)
:param req_y_axis_value: requested y value on UCC (normalization status is taken from current display)
:param vary_bias: set to True when referring to bias-induced UCC (scale UCC default)
:return: list of dicts (recipes), or a single dict
"""
if self.d is None:
raise ValueError("ERROR(UCC): call fit() prior to using this method.")
if np.sum([req_x_axis_value is not None, req_y_axis_value is not None, req_critical_value is not None]) != 1:
raise ValueError("ERROR(UCC): exactly one axis value must be requested at a time.")
if vary_bias and not self.precompute_bias_data:
raise ValueError("ERROR(UCC): Cannot vary bias - instantiated without bias data computation")
xnorm = self.std_unit if self.norm_x_axis else 1.
ynorm = self.std_unit if self.norm_y_axis else 1.
recipe = []
for dc in range(len(self.d)):
plotdata = self.plotdata_for_bias[dc] if vary_bias else self.plotdata_for_scale[dc]
if req_x_axis_value is not None:
tgtidx = self.x_axis_idx
req_value = req_x_axis_value * xnorm
elif req_y_axis_value is not None:
tgtidx = self.y_axis_idx
req_value = req_y_axis_value * ynorm
elif req_critical_value is not None:
req_value = req_critical_value
tgtidx = 0 # first element in plotdata is always the critical value (scale of bias)
else:
raise RuntimeError("Unhandled case")
closestidx = np.argmin(np.asarray([np.abs(p[tgtidx] - req_value) for p in plotdata]))
recipe.append({'operation': ('bias' if vary_bias else 'scale'),
'modvalue': plotdata[closestidx][0],
'new_x': plotdata[closestidx][self.x_axis_idx] / xnorm,
'new_y': plotdata[closestidx][self.y_axis_ |
idx] / ynorm})
if len(recipe) < 2:
return recipe[0]
else:
return recipe
def _find_min_cost_in_component(self, plotdata, idx1, idx2, cost1, cost2):
"""
Find s minimum cost function value and corresp. position index in plotdata
:param plotdata: liste of tuples
:param idx1: idx of x-axis item within the tuple
:param idx2: idx of y-axis item within the tuple
:param cost1: cost factor for x-axis unit
:param cost2: cost factor for y-axis unit
:return: min cost value, index within plotdata where minimum occurs
"""
raw = [cost1 * i[idx1] + cost2 * i[idx2] for i in plotdata]
minidx = np.argmin(raw)
return raw[minidx], minidx
def _sanitize_input(self, x):
"""
Replaces problematic values in input data (e.g, zero error bars)
:param x: single matrix of input data [n, 3]
:return: sanitized version of x
"""
if np.isclose(np.sum(x[:, 1]), 0.):
raise ValueError("ERROR(UCC): Provided lower bands are all zero.")
if np.isclose(np.sum(x[:, 2]), 0.):
raise ValueError("ERROR(UCC): Provided upper bands are all zero.")
for i in [1, 2]:
if any(np.isclose(x[:, i], 0.)):
print("WARN(UCC): some band values are 0. - REPLACING with positive minimum")
m = np.min(x[x[:, i] > 0, i])
x = np.where(np.isclose(x, 0.), m, x)
return x
def _calc_avg_excess(self, d, lb, ub):
"""
Excess is amount an error bar overshoots actual
:param d: pred-actual array
:param lb: lower band
:param ub: upper band
:return: average excess over array
"""
excess = np.zeros(d.shape)
posidx = np.where(d >= 0)[0]
excess[posidx] = np.where(ub[posidx] - d[posidx] < 0., 0., ub[posidx] - d[posidx])
negidx = np.where(d < 0)[0]
excess[negidx] = np.where(lb[negidx] + d[negidx] < 0., 0., lb[negidx] + d[negidx])
return np.mean(excess)
def _calc_avg_deficit(self, d, lb, ub):
"""
Deficit is error bar insufficiency: bar falls short of actual
:param d: pred-actual array
:param lb: lower band
:param ub: upper band
:return: average deficit over array
"""
deficit = np.zeros(d.shape)
posidx = np.where(d >= 0)[0]
deficit[posidx] = np.where(- ub[posidx] + d[posidx] < 0., 0., - ub[posidx] + d[posidx])
negidx = np.where(d < 0)[0]
deficit[negidx] = np.where(- lb[negidx] - d[negidx] < 0., 0., - lb[negidx] - d[negidx])
return np.mean(deficit)
def _calc_missrate_bandwidth_excess_deficit(self, d, lb, ub, scale=1.0, bias=0.0):
"""
Calculates recall at a given scale/bias, average bandwidth and average excess
:param d: delta
:param lb: lower band
:param ub: upper band
:param scale: scale * (x + bias)
:param bias:
:return: miss rate, average bandwidth, avg excess, avg deficit
"""
abslband = scale * np.where((lb + bias) < 0., 0., lb + bias)
absuband = scale * np.where((ub + bias) < 0., 0., ub + bias)
recall = np.sum((d >= - abslband) & (d <= absuband)) / len(d)
avgbandwidth = np.mean([absuband, abslband])
avgexcess = self._calc_avg_excess(d, abslband, absuband)
avgdeficit = self._calc_avg_deficit(d, abslband, absuband)
return 1 - recall, avgbandwidth, avgexcess, avgdeficit
def _calc_plotdata(self, d, lb, ub, vary_bias=False):
"""
Generates data necessary for various UCC metrics.
:param d: delta (predicted - actual) vector
:param ub: upper uncertainty bandwidth (above predicted)
:param lb: lower uncertainty bandwidth (below predicted) - all positive (bandwidth)
:param vary_bias: True will switch to additive bias instead of scale
:return: list. Elements are tuples (varyvalue, missrate, bandwidth, excess, deficit)
"""
# step 1: collect critical scale or bias values
critval = []
for i in range(len(d)):
if not vary_bias:
if d[i] >= 0:
critval.append(d[i] / ub[i])
else:
critval.append(-d[i] / lb[i])
else:
if d[i] >= 0:
critval.append(d[i] - ub[i])
else:
critval.append(-lb[i] - d[i])
critval = sorted(critval)
plotdata = []
for i in range(len(critval)):
if not vary_bias:
missrate, bandwidth, excess, deficit = self._calc_missrate_bandwidth_excess_deficit(d, lb, ub,
scale=critval[i])
else:
missrate, bandwidth, excess, deficit = self._calc_missrate_bandwidth_excess_deficit(d, lb, ub,
bias=critval[i])
plotdata.append((critval[i], missrate, bandwidth, excess, deficit))
return plotdata
def get_AUUCC(self, vary_bias=False, aucfct="trapz", partial_x=None, partial_y=None):
"""
returns approximate area under the curve on current coordinates, for each component.
:param vary_bias: False == varies scale, True == varies bias
:param aucfct: specifies AUC integrator (can be "trapz", "simps")
:param partial_x: tuple (x_min, x_max) defining interval on x to calc a a partial AUC.
The interval bounds refer to axes as visualized (ie. potentially normed)
:param partial_y: tuple (y_min, y_max) defining interval on y to calc a a partial AUC. partial_x must be None.
:return: list of floats with AUUCCs for each input component, or a single float, if there is only 1 component.
"""
if self.d is None:
raise ValueError("ERROR(UCC): call fit() prior to using this method.")
if vary_bias and not self.precompute_bias_data:
raise ValueError("ERROR(UCC): Cannot vary bias - instantiated without bias data computation")
if partial_x is not None and partial_y is not None:
raise ValueError("ERROR(UCC): partial_x and partial_y can not be specified at the same time.")
assert(partial_x is None or (isinstance(partial_x, tuple) and len(partial_x)==2))
assert(partial_y is None or (isinstance(partial_y, tuple) and len(partial_y)==2))
# find starting point (where the x axis value starts to actually change)
rv = []
# do this for individual streams
xind = self.x_axis_idx
aucfct = simps if aucfct == "simps" else trapz
for s in range(len(self.d)):
plotdata = self.plotdata_for_bias[s] if vary_bias else self.plotdata_for_scale[s]
prev = plotdata[0][xind]
t = 1
cval = plotdata[t][xind]
while cval == prev and t < len(plotdata) - 1:
t += 1
prev = cval
cval = plotdata[t][xind]
startt = t - 1 # from here, it's a valid function
endtt = len(plotdata)
if startt >= endtt - 2:
rvs = 0. # no area
else:
xnorm = self.std_unit if self.norm_x_axis else 1.
ynorm = self.std_unit if self.norm_y_axis else 1.
y=[(plotdata[i][self.y_axis_idx]) / ynorm for i in range(startt, endtt)]
x=[(plotdata[i][self.x_axis_idx]) / xnorm for i in range(startt, endtt)]
if partial_x is not None:
from_i = self._find_closest_index(partial_x[0], x)
to_i = self._find_closest_index(partial_x[1], x) + 1
elif partial_y is not None:
from_i = self._find_closest_index(partial_y[0], y)
to_i = self._find_closest_index(partial_y[1], y)
if from_i > to_i: # y is in reverse order
from_i, to_i = to_i, from_i
to_i += 1 # as upper bound in array indexing
else:
from_i = 0
to_i = len(x)
to_i = min(to_i, len(x))
if to_i < from_i:
raise ValueError("ERROR(UCC): Failed to find an appropriate partial-AUC interval in the data.")
if to_i - from_i < 2:
raise RuntimeError("ERROR(UCC): There are too few samples (1) in the partial-AUC interval specified")
rvs = aucfct(x=x[from_i:to_i], y=y[from_i:to_i])
rv.append(rvs)
if len(rv) < 2:
return rv[0]
else:
return rv
@ staticmethod
def _find_closest_index(value, array):
"""
Returns an index of the 'array' element closest in value to 'value'
:param value:
:param array:
:return:
"""
return np.argmin(np.abs(np.asarray(array)-value))
def _get_single_OP(self, d, lb, ub, scale=1., bias=0.):
"""
Returns Operating Point for original input data, on coordinates currently set up, given a scale/bias.
:param scale:
:param bias:
:return: single tuple (x point, y point, unit of x, unit of y)
"""
xnorm = self.std_unit if self.norm_x_axis else 1.
ynorm = self.std_unit if self.norm_y_axis else 1.
auxop = self._calc_missrate_bandwidth_excess_deficit(d, lb, ub, scale=scale, bias=bias)
op = [0.] + [i for i in auxop] # mimic plotdata (first element ignored here)
return (op[self.x_axis_idx] / xnorm, op[self.y_axis_idx] / ynorm, xnorm, ynorm)
def get_OP(self, scale=1., bias=0.):
"""
Returns all Operating Points for original input data, on coordinates currently set up, given a scale/bias.
:param scale:
:param bias:
:return: list of tuples (x point, y point, unit of x, unit of y) or a single tuple if there is only
1 component.
"""
if self.d is None:
raise ValueError("ERROR(UCC): call fit() prior to using this method.")
op = []
for dc in range(len(self.d)):
op.append(self._get_single_OP(self.d[dc], self.lb[dc], self.ub[dc], scale=scale, bias=bias))
if len(op) < 2:
return op[0]
else:
return op
def plot_UCC(self, titlestr='', syslabel='model', outfn=None, vary_bias=False, markers=None,
xlim=None, ylim=None, **kwargs):
""" Will plot/display the UCC based on current data and coordinates. Multiple curves will be shown
if there are multiple data components (via fit())
:param titlestr: Plot title string
:param syslabel: list is label strings to appear in the plot legend. Can be single, if one component.
:param outfn: base name of an image file to be created (will append .png before creating)
:param vary_bias: True will switch to varying additive bias (default is multiplicative scale)
:param markers: None or a list of marker styles to be used for each curve.
List must be same or longer than number of components.
Markers can be one among these ['o', 's', 'v', 'D', '+'].
:param xlim: tuples or lists of specifying the range for the x axis, or None (auto)
:param ylim: tuples or lists of specifying the range for the y axis, or None (auto)
:param `**kwargs`: Additional arguments passed to the main plot call.
:return: list of areas under the curve (or single area, if one data component)
list of operating points (or single op): format of an op is tuple (xaxis value, yaxis value, xunit, yunit)
"""
if self.d is None:
raise ValueError("ERROR(UCC): call fit() prior to using this method.")
if vary_bias and not self.precompute_bias_data:
raise ValueError("ERROR(UCC): Cannot vary bias - instantiated without bias data computation")
if not isinstance(syslabel, list):
syslabel = [syslabel]
assert (len(syslabel) == len(self.d))
assert (markers is None or (isinstance(markers, list) and len(markers) >= len(self.d)))
# main plot of (possibly multiple) datasets
plt.figure()
xnorm = self.std_unit if self.norm_x_axis else 1.
ynorm = self.std_unit if self.norm_y_axis else 1.
op_info = []
auucc = self.get_AUUCC(vary_bias=vary_bias)
auucc = [auucc] if not isinstance(auucc, list) else auucc
for s in range(len(self.d)):
# original operating point
x_op, y_op, x_unit, y_unit = self._get_single_OP(self.d[s], self.lb[s], self.ub[s])
op_info.append((x_op, y_op, x_unit, y_unit))
# display chart
plotdata = self.plotdata_for_scale[s] if not vary_bias else self.plotdata_for_bias[s]
axisX_data |
= [i[self.x_axis_idx] / xnorm for i in plotdata]
axisY_data = [i[self.y_axis_idx] / ynorm for i in plotdata]
marker = None
if markers is not None: marker = markers[s]
p = plt.plot(axisX_data, axisY_data, lab |
- 0.5
return kld_weights.sum() + kld_bias.sum()
class HorseshoeLayer(BayesianLinearLayer):
"""
Uses non-centered parametrization. w_k = v*tau_k*beta_k where k indexes an output unit and w_k and beta_k
are vectors of all weights incident into the unit
"""
def __init__(self, in_features, out_features, cuda=False, scale=1.):
super(HorseshoeLayer, self).__init__(in_features, out_features)
self.cuda = cuda
self.in_features = in_features
self.out_features = out_features
self.nodescales = InvGammaHalfCauchyLayer(out_features=out_features, b=1.)
self.layerscale = InvGammaHalfCauchyLayer(out_features=1, b=scale)
# prior on beta is N(0, I) when employing non centered parameterization
self.prior_stdv = torch.Tensor([1])
self.prior_mean = torch.Tensor([0.])
def forward(self, x, do_sample=True, debug=False, eps_scale=None, eps_w=None):
# At a particular unit k, preactivation_sample = scale_sample * pre_activation_sample
# sample scales
scale_mean = 0.5 * (self.nodescales.mu + self.layerscale.mu)
scale_var = 0.25 * (self.nodescales.log_sigma.exp() ** 2 + self.layerscale.log_sigma.exp() ** 2)
scale_sample = reparam(scale_mean, scale_var.log(), do_sample=do_sample).exp()
# sample preactivations
mu_activations = F.linear(x, self.weights, self.bias)
var_activations = F.linear(x.pow(2), self.weights_logvar.exp(), self.bias_logvar.exp())
activ_sample = reparam(mu_activations, var_activations.log(), do_sample=do_sample)
return scale_sample * activ_sample
def kl(self):
return super(HorseshoeLayer, self).kl() + self.nodescales.kl() + self.layerscale.kl()
def fixed_point_updates(self):
self.nodescales.fixed_point_updates()
self.layerscale.fixed_point_updates()
class RegularizedHorseshoeLayer(HorseshoeLayer):
"""
Uses the regularized Horseshoe distribution. The regularized Horseshoe soft thresholds the tails of the Horseshoe.
For all weights w_k incident upon node k in the layer we have:
w_k ~ N(0, (tau_k * v)^2 I) N(0, c^2 I), c^2 ~ InverseGamma(c_a, b).
c^2 controls the scale of the thresholding. As c^2 -> infinity, the regularized Horseshoe -> Horseshoe.
"""
def __init__(self, in_features, out_features, cuda=False, scale=1., c_a=2., c_b=6.):
super(RegularizedHorseshoeLayer, self).__init__(in_features, out_features, cuda=cuda, scale=scale)
self.c = InvGammaLayer(a=c_a, b=c_b)
def forward(self, x, do_sample=True, **kwargs):
# At a particular unit k, preactivation_sample = scale_sample * pre_activation_sample
# sample regularized scales
scale_mean = self.nodescales.mu + self.layerscale.mu
scale_var = self.nodescales.log_sigma.exp() ** 2 + self.layerscale.log_sigma.exp() ** 2
scale_sample = reparam(scale_mean, scale_var.log(), do_sample=do_sample).exp()
c_sample = reparam(self.c.mu, 2 * self.c.log_sigma, do_sample=do_sample).exp()
regularized_scale_sample = (c_sample * scale_sample) / (c_sample + scale_sample)
# sample preactivations
mu_activations = F.linear(x, self.weights, self.bias)
var_activations = F.linear(x.pow(2), self.weights_logvar.exp(), self.bias_logvar.exp())
activ_sample = reparam(mu_activations, var_activations.log(), do_sample=do_sample)
return torch.sqrt(regularized_scale_sample) * activ_sample
def kl(self):
return super(RegularizedHorseshoeLayer, self).kl() + self.c.kl()
class NodeSpecificRegularizedHorseshoeLayer(RegularizedHorseshoeLayer):
"""
Uses the regularized Horseshoe distribution. The regularized Horseshoe soft thresholds the tails of the Horseshoe.
For all weights w_k incident upon node k in the layer we have:
w_k ~ N(0, (tau_k * v)^2 I) N(0, c_k^2 I), c_k^2 ~ InverseGamma(a, b).
c_k^2 controls the scale of the thresholding. As c_k^2 -> infinity, the regularized Horseshoe -> Horseshoe
Note that we now have a per-node c_k.
"""
def __init__(self, in_features, out_features, cuda=False, scale=1., c_a=2., c_b=6.):
super(NodeSpecificRegularizedHorseshoeLayer, self).__init__(in_features, out_features, cuda=cuda, scale=scale)
self.c = InvGammaLayer(a=c_a, b=c_b, out_features=out_features)
<s> import numpy as np
import torch
from uq360.models.noise_models.homoscedastic_noise_models import GaussianNoiseFixedPrecision
def compute_test_ll(y_test, y_pred_samples, std_y=1.):
"""
Computes test log likelihoods = (1 / Ntest) * \\sum_n p(y_n | x_n, D_train)
:param y_test: True y
:param y_pred_samples: y^s = f(x_test, w^s); w^s ~ q(w). S x Ntest, where S is the number of samples
q(w) is either a trained variational posterior or an MCMC approximation to p(w | D_train)
:param std_y: True std of y (assumed known)
"""
S, _ = y_pred_samples.shape
noise = GaussianNoiseFixedPrecision(std_y=std_y)
ll = noise.loss(y_pred=y_pred_samples, y_true=y_test.unsqueeze(dim=0), reduce_sum=False)
ll = torch.logsumexp(ll, dim=0) - np.log(S) # mean over num samples
return torch.mean(ll) # mean over test points
<s> from abc import ABC
import torch
from torch import nn
from uq360.models.bayesian_neural_networks.layers import BayesianLinearLayer
from uq360.models.noise_models.homoscedastic_noise_models import GaussianNoiseGammaPrecision
import numpy as np
td = torch.distributions
class BayesianNN(nn.Module, ABC):
"""
Bayesian neural network with zero mean Gaussian priors over weights.
"""
def __init__(self, layer=BayesianLinearLayer, ip_dim=1, op_dim=1, num_nodes=50,
activation_type='relu', num_layers=1):
super(BayesianNN, self).__init__()
self.num_layers = num_layers
if activation_type == 'relu':
# activation
self.activation = nn.ReLU()
elif activation_type == 'tanh':
self.activation = nn.Tanh()
else:
print("Activation Type not supported")
self.fc_hidden = []
self.fc1 = layer(ip_dim, num_nodes,)
for _ in np.arange(self.num_layers - 1):
self.fc_hidden.append(layer(num_nodes, num_nodes, ))
self.fc_out = layer(num_nodes, op_dim, )
self.noise_layer = None
def forward(self, x, do_sample=True):
x = self.fc1(x, do_sample=do_sample)
x = self.activation(x)
for layer in self.fc_hidden:
x = layer(x, do_sample=do_sample)
x = self.activation(x)
return self.fc_out(x, do_sample=do_sample, scale_variances=True)
def kl_divergence_w(self):
kld = self.fc1.kl() + self.fc_out.kl()
for layer in self.fc_hidden:
kld += layer.kl()
return kld
def prior_predictive_samples(self, n_sample=100):
n_eval = 1000
x = torch.linspace(-2, 2, n_eval)[:, np.newaxis]
y = np.zeros([n_sample, n_eval])
for i in np.arange(n_sample):
y[i] = self.forward(x).data.numpy().ravel()
return x.data.numpy(), y
### get and set weights ###
def get_weights(self):
assert len(self.fc_hidden) == 0 # only works for one layer networks.
weight_dict = {}
weight_dict['layerip_means'] = torch.cat([self.fc1.weights, self.fc1.bias.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerip_logvar'] = torch.cat([self.fc1.weights_logvar, self.fc1.bias_logvar.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerop_means'] = torch.cat([self.fc_out.weights, self.fc_out.bias.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerop_logvar'] = torch.cat([self.fc_out.weights_logvar, self.fc_out.bias_logvar.unsqueeze(1)], dim=1).data.numpy()
return weight_dict
def set_weights(self, weight_dict):
assert len(self.fc_hidden) == 0 # only works for one layer networks.
to_param = lambda x: nn.Parameter(torch.Tensor(x))
self.fc1.weights = to_param(weight_dict['layerip_means'][:, :-1])
self.fc1.weights = to_param(weight_dict['layerip_logvar'][:, :-1])
self.fc1.bias = to_param(weight_dict['layerip_means'][:, -1])
self.fc1.bias_logvar = to_param(weight_dict['layerip_logvar'][:, -1])
self.fc_out.weights = to_param(weight_dict['layerop_means'][:, :-1])
self.fc_out.weights = to_param(weight_dict['layerop_logvar'][:, :-1])
self.fc_out.bias = to_param(weight_dict['layerop_means'][:, -1])
self.fc_out.bias_logvar = to_param(weight_dict['layerop_logvar'][:, -1])
class BayesianRegressionNet(BayesianNN, ABC):
"""
Bayesian neural net with N(y_true | f(x, w), \\lambda^-1); \\lambda ~ Gamma(a, b) likelihoods.
"""
def __init__(self, layer=BayesianLinearLayer, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu',
num_layers=1):
super(BayesianRegressionNet, self).__init__(layer=layer, ip_dim=ip_dim, op_dim=op_dim,
num_nodes=num_nodes, activation_type=activation_type,
num_layers=num_layers,
)
self.noise_layer = GaussianNoiseGammaPrecision(a0=6., b0=6.)
def likelihood(self, x=None, y=None):
out = self.forward(x)
return -self.noise_layer.loss(y_pred=out, y_true=y)
def neg_elbo(self, num_batches, x=None, y=None):
# scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo.
Elik = self.likelihood(x, y)
neg_elbo = (self.kl_divergence_w() + self.noise_layer.kl()) / num_batches - Elik
return neg_elbo
def mse(self, x, y):
"""
scaled rmse (scaled by 1 / std_y**2)
"""
E_noise_precision = 1. / self.noise_layer.get_noise_var()
return (0.5 * E_noise_precision * (self.forward(x, do_sample=False) - y)**2).sum()
def get_noise_var(self):
return self.noise_layer.get_noise_var()
class BayesianClassificationNet(BayesianNN, ABC):
"""
Bayesian neural net with Categorical(y_true | f(x, w)) likelihoods. Use for classification.
"""
def __init__(self, layer=BayesianLinearLayer, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu',
num_layers=1):
super(BayesianClassificationNet, self).__init__(layer=layer, ip_dim=ip_dim, op_dim=op_dim,
num_nodes=num_nodes, activation_type=activation_type,
num_layers=num_layers)
self.noise_layer = torch.nn.CrossEntropyLoss(reduction='sum')
def likelihood(self, x=None, y=None):
out = self.forward(x)
return -self.noise_layer(out, y)
def neg_elbo(self, num_batches, x=None, y=None):
# scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo.
Elik = self.likelihood(x, y)
neg_elbo = self.kl_divergence_w() / num_batches - Elik
return neg_elbo
<s><s> from abc import ABC
import numpy as np
import torch
from torch import nn
from uq360.models.bayesian_neural_networks.layers import HorseshoeLayer, BayesianLinearLayer, RegularizedHorseshoeLayer
from uq360.models.noise_models.homoscedastic_noise_models import GaussianNoiseGammaPrecision
import numpy as np
td = torch.distributions
class HshoeB |
NN(nn.Module, ABC):
"""
Bayesian neural network with Horseshoe layers.
"""
def __init__(self, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu', num_layers=1,
hshoe_scale=1e-1, use_reg_hshoe=False):
if use_reg_hshoe:
layer = RegularizedHorseshoeLayer
else:
layer = HorseshoeLayer
super(HshoeBNN, self).__init__()
self.num_layers = num_layers
if activation_type == 'relu':
# activation
self.activation = nn.ReLU()
elif activation_type == 'tanh':
self.activation = nn.Tanh()
else:
print("Activation Type not supported")
self.fc_hidden = []
self.fc1 = layer(ip_dim, num_nodes, scale=hshoe_scale)
for _ in np.arange(self.num_layers - 1):
self.fc_hidden.append(layer(num_nodes, num_nodes))
self.fc_out = BayesianLinearLayer(num_nodes, op_dim)
self.noise_layer = None
def forward(self, x, do_sample=True):
x = self.fc1(x, do_sample=do_sample)
x = self.activation(x)
for layer in self.fc_hidden:
x = layer(x, do_sample=do_sample)
x = self.activation(x)
return self.fc_out(x, do_sample=do_sample, scale_variances=True)
def kl_divergence_w(self):
kld = self.fc1.kl() + self.fc_out.kl()
for layer in self.fc_hidden:
kld += layer.kl()
return kld
def fixed_point_updates(self):
if hasattr(self.fc1, 'fixed_point_updates'):
self.fc1.fixed_point_updates()
if hasattr(self.fc_out, 'fixed_point_updates'):
self.fc_out.fixed_point_updates()
for layer in self.fc_hidden:
if hasattr(layer, 'fixed_point_updates'):
layer.fixed_point_updates()
def prior_predictive_samples(self, n_sample=100):
n_eval = 1000
x = torch.linspace(-2, 2, n_eval)[:, np.newaxis]
y = np.zeros([n_sample, n_eval])
for i in np.arange(n_sample):
y[i] = self.forward(x).data.numpy().ravel()
return x.data.numpy(), y
### get and set weights ###
def get_weights(self):
assert len(self.fc_hidden) == 0 # only works for one layer networks.
weight_dict = {}
weight_dict['layerip_means'] = torch.cat([self.fc1.weights, self.fc1.bias.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerip_logvar'] = torch.cat([self.fc1.weights_logvar, self.fc1.bias_logvar.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerop_means'] = torch.cat([self.fc_out.weights, self.fc_out.bias.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerop_logvar'] = torch.cat([self.fc_out.weights_logvar, self.fc_out.bias_logvar.unsqueeze(1)], dim=1).data.numpy()
return weight_dict
def set_weights(self, weight_dict):
assert len(self.fc_hidden) == 0 # only works for one layer networks.
to_param = lambda x: nn.Parameter(torch.Tensor(x))
self.fc1.weights = to_param(weight_dict['layerip_means'][:, :-1])
self.fc1.weights = to_param(weight_dict['layerip_logvar'][:, :-1])
self.fc1.bias = to_param(weight_dict['layerip_means'][:, -1])
self.fc1.bias_logvar = to_param(weight_dict['layerip_logvar'][:, -1])
self.fc_out.weights = to_param(weight_dict['layerop_means'][:, :-1])
self.fc_out.weights = to_param(weight_dict['layerop_logvar'][:, :-1])
self.fc_out.bias = to_param(weight_dict['layerop_means'][:, -1])
self.fc_out.bias_logvar = to_param(weight_dict['layerop_logvar'][:, -1])
class HshoeRegressionNet(HshoeBNN, ABC):
"""
Horseshoe net with N(y_true | f(x, w), \\lambda^-1); \\lambda ~ Gamma(a, b) likelihoods.
"""
def __init__(self, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu',
num_layers=1, hshoe_scale=1e-5, use_reg_hshoe=False):
super(HshoeRegressionNet, self).__init__(ip_dim=ip_dim, op_dim=op_dim,
num_nodes=num_nodes, activation_type=activation_type,
num_layers=num_layers,
hshoe_scale=hshoe_scale,
use_reg_hshoe=use_reg_hshoe)
self.noise_layer = GaussianNoiseGammaPrecision(a0=6., b0=6.)
def likelihood(self, x=None, y=None):
out = self.forward(x)
return -self.noise_layer.loss(y_pred=out, y_true=y)
def neg_elbo(self, num_batches, x=None, y=None):
# scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo.
Elik = self.likelihood(x, y)
neg_elbo = (self.kl_divergence_w() + self.noise_layer.kl()) / num_batches - Elik
return neg_elbo
def mse(self, x, y):
"""
scaled rmse (scaled by 1 / std_y**2)
"""
E_noise_precision = 1. / self.noise_layer.get_noise_var()
return (0.5 * E_noise_precision * (self.forward(x, do_sample=False) - y)**2).sum()
def get_noise_var(self):
return self.noise_layer.get_noise_var()
class HshoeClassificationNet(HshoeBNN, ABC):
"""
Horseshoe net with Categorical(y_true | f(x, w)) likelihoods. Use for classification.
"""
def __init__(self, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu',
num_layers=1, hshoe_scale=1e-5, use_reg_hshoe=False):
super(HshoeClassificationNet, self).__init__(ip_dim=ip_dim, op_dim=op_dim,
num_nodes=num_nodes, activation_type=activation_type,
num_layers=num_layers,
hshoe_scale=hshoe_scale,
use_reg_hshoe=use_reg_hshoe)
self.noise_layer = torch.nn.CrossEntropyLoss(reduction='sum')
def likelihood(self, x=None, y=None):
out = self.forward(x)
return -self.noise_layer(out, y)
def neg_elbo(self, num_batches, x=None, y=None):
# scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo.
Elik = self.likelihood(x, y)
neg_elbo = (self.kl_divergence_w()) / num_batches - Elik
return neg_elbo
<s> import abc
import sys
# Ensure compatibility with Python 2/3
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta(str('ABC'), (), {})
class AbstractNoiseModel(ABC):
""" Abstract class. All noise models inherit from here.
"""
def __init__(self, *argv, **kwargs):
""" Initialize an AbstractNoiseModel object.
"""
@abc.abstractmethod
def loss(self, *argv, **kwargs):
""" Compute loss given predictions and groundtruth labels
"""
raise NotImplementedError
@abc.abstractmethod
def get_noise_var(self, *argv, **kwargs):
"""
Return the current estimate of noise variance
"""
raise NotImplementedError
<s> import math
import numpy as np
import torch
from scipy.special import gammaln
from uq360.models.noise_models.noisemodel import AbstractNoiseModel
from torch.nn import Parameter
td = torch.distributions
def transform(a):
return torch.log(1 + torch.exp(a))
class GaussianNoise(torch.nn.Module, AbstractNoiseModel):
"""
N(y_true | f_\\mu(x, w), f_\\sigma^2(x, w))
"""
def __init__(self, cuda=False):
super(GaussianNoise, self).__init__()
self.cuda = cuda
self.const = torch.log(torch.FloatTensor([2 * math.pi]))
def loss(self, y_true=None, mu_pred=None, log_var_pred=None, reduce_mean=True):
"""
computes -1 * ln N (y_true | mu_pred, softplus(log_var_pred))
:param y_true:
:param mu_pred:
:param log_var_pred:
:return:
"""
var_pred = transform(log_var_pred)
ll = -0.5 * self.const - 0.5 * torch.log(var_pred) - 0.5 * (1. / var_pred) * ((mu_pred - y_true) ** 2)
if reduce_mean:
return -ll.mean(dim=0)
else:
return -ll.sum(dim=0)
def get_noise_var(self, log_var_pred):
return transform(log_var_pred)
<s><s> import math
import numpy as np
import torch
from scipy.special import gammaln
from uq360.models.noise_models.noisemodel import AbstractNoiseModel
from torch.nn import Parameter
td = torch.distributions
def transform(a):
return torch.log(1 + torch.exp(a))
class GaussianNoiseGammaPrecision(torch.nn.Module, AbstractNoiseModel):
"""
N(y_true | f(x, w), \\lambda^-1); \\lambda ~ Gamma(a, b).
Uses a variational approximation; q(lambda) = Gamma(ahat, bhat)
"""
def __init__(self, a0=6, b0=6, cuda=False):
super(GaussianNoiseGammaPrecision, self).__init__()
self.cuda = cuda
self.a0 = a0
self.b0 = b0
self.const = torch.log(torch.FloatTensor([2 * math.pi]))
# variational parameters
self.ahat = Parameter(torch.FloatTensor([10.]))
self.bhat = Parameter(torch.FloatTensor([3.]))
def loss(self, y_pred=None, y_true=None):
"""
computes -1 * E_q(\\lambda)[ln N (y_pred | y_true, \\lambda^-1)], where q(lambda) = Gamma(ahat, bhat)
:param y_pred:
:param y_true:
:return:
"""
n = y_pred.shape[0]
ahat = transform(self.ahat)
bhat = transform(self.bhat)
return -1 * (-0.5 * n * self.const + 0.5 * n * (torch.digamma(ahat) - torch.log(bhat)) \\
- 0.5 * (ahat/bhat) * ((y_pred - y_true) ** 2).sum())
def kl(self):
ahat = transform(self.ahat)
bhat = transform(self.bhat)
return (ahat - self.a0) * torch.digamma(ahat) - torch.lgamma(ahat) + gammaln(self.a0) + \\
self.a0 * (torch.log(bhat) - np.log(self.b0)) + ahat * (self.b0 - bhat) / bhat
def get_noise_var(self):
ahat = transform(self.ahat)
bhat = transform(self.bhat)
return (bhat / ahat).data.numpy()[0]
class GaussianNoiseFixedPrecision(torch.nn.Module, AbstractNoiseModel):
"""
N(y_true | f(x, w), sigma_y**2); known sigma_y
"""
def __init__(self, std_y=1., cuda=False):
super(GaussianNoiseFixedPrecision, self).__init__()
self.cuda = cuda
self.const = torch.log(torch.FloatTensor([2 * math.pi]))
self.sigma_y = std_y
def loss(self, y_pred=None, y_true=None):
"""
computes -1 * ln N (y_pred | y_true, sigma_y**2)
:param y_pred:
:param y_true:
:return:
"""
ll = -0.5 * self.const - np.log(self.sigma_y) - 0.5 * (1. / self.sigma_y ** 2) * ((y_pred - y_true) ** 2)
return -ll.sum(dim=0)
def get_noise_var(self):
return self.sigma_y ** 2<s><s> import pandas as pd
tab = ' '
VALID_AGGREGATION_METHODS = ['mean','sum']
VALID_GRANULARITY_UNITS = ['second','minute','hour','day','week','month','year']
VALID_INTERPOLATE_KWARGS = {'linear':{},'spline':{'order':5},'timebased':{}}
VALID_INTERPOLATE_METHODS = list( VALID_INTERPOLATE_KWARGS.keys())
def get_one_true_option(d, default_value=None):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, |
str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
def get_boolean(value):
if (isinstance(value, str) and value.lower() == 'true') or (isinstance(value, bool) and value == True):
return True
else:
return False
def get_source_delta( data: pd.DataFrame):
MAX_SAMPLE_TRY = 20
if len( data) <= 1:
return None
time_delta = data.index[-1] - data.index[-2]
count = {}
for i in range(len(data)):
if i == MAX_SAMPLE_TRY or i == data.index[-1]:
break
delta = data.index[i+1] - data.index[i]
if delta not in count.keys():
count[delta] = 1
else:
count[delta] += 1
if count:
return max(count, key=count.get)
else:
return None
class timeSeries():
def __init__( self, config, datetime, log=None):
self.datetime = datetime
self.validate_config(config)
self.log = log
def validate_config( self, config):
if not self.datetime or self.datetime.lower() == 'na':
raise ValueError('date time feature is not provided')
self.config = {}
method = get_one_true_option(config.get('interpolation',None))
self.config['interpolate'] = {}
self.config['interpolate']['enabled'] = method in VALID_INTERPOLATE_METHODS
self.config['interpolate']['method'] = method
self.config['rolling'] = {}
self.config['rolling']['enabled'] = get_boolean( config.get('rollingWindow',False))
self.config['rolling']['size'] = int( config.get('rollingWindowSize',1))
if self.config['rolling']['size'] < 1:
raise ValueError('Rolling window size should be greater than 0.')
self.config['aggregation'] = {}
aggregation = config.get('aggregation',{})
agg_method = get_one_true_option(aggregation['type'])
self.config['aggregation'] = {}
self.config['aggregation']['enabled'] = agg_method in VALID_AGGREGATION_METHODS
self.config['aggregation']['method'] = agg_method
granularity = aggregation.get('granularity',{})
granularity_unit = get_one_true_option( granularity.get('unit',None))
if granularity_unit in VALID_GRANULARITY_UNITS:
granularity_mapping = {'second':'S','minute':'Min','hour':'H','day':'D','week':'W','month':'M','year':'Y'}
size = int(granularity.get('size',10))
granularity_unit = granularity_mapping.get(granularity_unit,granularity_unit)
self.config['aggregation']['granularity'] = {}
self.config['aggregation']['granularity']['unit'] = granularity_unit
self.config['aggregation']['granularity']['size'] = size
def log_info(self, msg, type='info'):
if self.log:
if type == 'error':
self.log.error( msg)
else:
self.log.info( msg)
else:
print( msg)
def is_down_sampling(self, data, size, granularity_unit):
down_sampling = False
if granularity_unit in ['M', 'Y']:
return True
else:
target_delta = pd.Timedelta(size , granularity_unit)
source_delta = get_source_delta(data)
if not source_delta:
raise ValueError('Could not find the data frame time frequency')
return source_delta < target_delta
def run( self, data):
if self.datetime not in data.columns:
raise ValueError(f"Date time feature '{self.datetime}' is not present in data")
try:
# data[self.datetime] = pd.to_datetime( data[self.datetime])
##For bug 13513 - If the datetime needs UTC timestamp process, except part will handle.
try:
#for non utc timestamp
data[self.datetime] = pd.to_datetime( data[self.datetime])
except:
#for utc timestamp
data[self.datetime] = pd.to_datetime( data[self.datetime],utc=True)
data.set_index( self.datetime, inplace=True)
except:
raise ValueError(f"can not convert '{self.datetime}' to dateTime")
if self.config.get('interpolate',{}).get('enabled',False):
method = self.config['interpolate']['method']
self.log_info(f"Applying Interpolation using {method}")
methods_mapping = {'timebased': 'time'}
self.config['interpolate']['mapped_method'] = methods_mapping.get(method, method)
data.interpolate(method=self.config['interpolate']['mapped_method'], inplace=True, **VALID_INTERPOLATE_KWARGS[method])
if self.config.get('rolling',{}).get('enabled',False):
if self.config['rolling']['size'] > len( data):
raise ValueError('Rolling window size is greater than dataset size')
self.log_info(f"Applying rolling window( moving avg) with size {self.config['rolling']['size']}")
data = data.rolling( self.config['rolling']['size']).mean()
data = data.iloc[self.config['rolling']['size'] - 1:]
aggregation = self.config.get('aggregation',{})
if aggregation.get('enabled',False):
method = aggregation.get('method','mean')
self.rule = str(aggregation['granularity']['size']) + aggregation['granularity']['unit']
if self.is_down_sampling(data, aggregation['granularity']['size'], aggregation['granularity']['unit']):
self.log_info(f"Applying down sampling( {self.rule})")
if method == 'mean':
data = data.resample( self.rule).mean()
elif method == 'sum':
data = data.resample( self.rule).sum()
else:
self.log_info(f"Applying up sampling using forward fill method( {self.rule})")
data = data.resample( self.rule).ffill()
data.reset_index( inplace=True, names=self.datetime)
return data
def get_code(self, indent=0):
tab = ' '
code = ''
code += f"""
def preprocess( data):
try:
#for non utc timestamp
data['{self.datetime}'] = pd.to_datetime( data['{self.datetime}'])
except:
data['{self.datetime}'] = pd.to_datetime( data['{self.datetime}'],utc=True)
data.set_index( '{self.datetime}', inplace=True)
"""
if self.config.get('interpolate',{}).get('enabled',False):
code += tab + f"data.interpolate(method='{self.config['interpolate']['mapped_method']}', inplace=True, **{VALID_INTERPOLATE_KWARGS[self.config['interpolate']['method']]})\\n"
if self.config.get('rolling',{}).get('enabled',False):
code += tab + f"data = data.rolling( {self.config['rolling']['size']}).mean().iloc[{self.config['rolling']['size'] - 1}:]\\n"
if self.config.get('aggregation',{}).get('enabled',False):
code += tab + f"data = data.resample( '{self.rule}').{self.config.get('aggregation',{}).get('method','mean')}()\\n"
code += tab + f"data.reset_index( inplace=True, names='{self.datetime}')\\n"
code += tab + "return data\\n"
return code
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import numpy as np
import pandas as pd
import sys
import os
import warnings
import logging
from pathlib import Path
import random
from sklearn.model_selection import train_test_split
import operator
import re
import pdfplumber
class dataReader():
def __init__(self):
self.dataDf =None
self.log = logging.getLogger('eion')
def readCsv(self,dataPath,featureList,targetColumn):
data=pd.read_csv(dataPath)
dataDf=data[featureList]
predictDf=data[targetColumn]
return dataDf,predictDf
def rowsfilter(self,filters,dataframe):
self.log.info('\\n-------> No of rows before filtering: '+str(dataframe.shape[0])) #task-13479
filterexpression=''
firstexpressiondone = False
for x in filters:
if firstexpressiondone:
filterexpression += ' '
if x['combineOperator'].lower() == 'and':
filterexpression += '&'
elif x['combineOperator'].lower() == 'or':
filterexpression += '|'
filterexpression += ' '
firstexpressiondone = True
filterexpression += x['feature']
filterexpression += ' '
if x['condition'].lower() == 'equals':
filterexpression += '=='
elif x['condition'].lower() == 'notequals':
filterexpression += '!='
elif x['condition'].lower() == 'lessthan':
filterexpression += '<'
elif x['condition'].lower() == 'lessthanequalto':
filterexpression += '<='
elif x['condition'].lower() == 'greaterthan':
filterexpression += '>'
elif x['condition'].lower() == 'greaterthanequalto':
filterexpression += '>='
filterexpression += ' '
if dataframe[x['feature']].dtype in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']:
filterexpression += x['value']
else:
filterexpression += '\\''+x['value']+'\\''
dataframe = dataframe.query(filterexpression)
self.log.info('-------> Row filter: '+str(filterexpression)) #task-13479
self.log.info('-------> No of rows after filtering: '+str(dataframe.shape[0]))
return dataframe,filterexpression
def grouping(self,grouper,dataframe):
grouperbyjson= {}
groupbyfeatures = grouper['groupby']
dataframe = dataframe.reset_index()
features = dataframe.columns.tolist()
aggjson = {}
for feature, featureType in zip(features,dataframe.dtypes):
if feature == groupbyfeatures or feature == 'index':
continue
if dataframe[feature].empty == True:
continue
if dataframe[feature].isnull().all() == True:
continue
if featureType in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']:
temp = {}
temp[feature+'_size'] = 'size'
temp[feature+'_sum'] = 'sum'
temp[feature+'_max'] = 'max'
temp[feature+'_min'] = 'min'
temp[feature+'_mean'] = 'mean'
aggjson[feature] = temp
else:
temp = {}
temp[feature+'_size'] = 'size'
temp[feature+'_unique'] = 'nunique'
aggjson[feature] = temp
groupbystring = 'groupby([\\''+groupbyfeatures+'\\']).agg('+str(aggjson)+')'
grouperbyjson['groupbystring'] = groupbystring
dataframe = dataframe.groupby([groupbyfeatures]).agg(aggjson)
dataframe.columns = dataframe.columns.droplevel(0)
dataframe = dataframe.reset_index()
'''
if operation.lower() == 'size':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).size()
elif operation.lower() == 'mean':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).mean()
elif operation.lower() == 'max':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).max()
elif operation.lower() == 'min':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).min()
dataframe = dataframe.rename("groupby_value")
dataframe = dataframe.to_frame()
dataframe = dataframe.reset_index()
'''
return dataframe,grouperbyjson
def timeGrouping(self,timegrouper,dataframe):
grouperbyjson= {}
dateTime = timegrouper['dateTime']
frequency = timegrouper['freq']
groupbyfeatures = timegrouper['groupby']
grouperbyjson['datetime'] = dateTime
if dataframe[dateTime].dtypes in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']:
dtlenth = dataframe[dateTime].iloc[0]
dtlenth = np.int64(dtlenth)
dtlenth = len(str(dtlenth))
if dtlenth == 13:
dataframe['date'] = pd.to_datetime(dataframe[dateTime],unit='ms')
grouperbyjson['unit'] = 'ms'
elif dtlenth == 10:
dataframe['date'] = pd.to_datetime(dataframe[dateTime],unit='s')
grouperbyjson['unit'] = 's'
else:
dataframe['date'] = pd.to_datetime(dataframe[dateTime])
grouperbyjson['unit'] = ''
else:
dataframe['date'] = pd.to_datetime(dataframe[dateTime])
grouperbyjson['unit'] = ''
dataframe = dataframe.reset_index()
dataframe.set_index('date',inplace=True)
features = dataframe.columns.tolist()
aggjson = {}
for feature, featureType in zip(features,dataframe.dtypes):
if feature == groupbyfeatures |
or feature == dateTime or feature == 'index':
continue
if dataframe[feature].empty == True:
continue
if dataframe[feature].isnull().all() == True:
continue
if featureType in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']:
temp = {'size','sum','max','min','mean'}
aggjson[feature] = temp
else:
temp = {'size','nunique'}
aggjson[feature] = temp
if groupbyfeatures == '':
groupbystring = 'groupby([pd.Grouper(freq=\\''+frequency+'\\')]).agg('+str(aggjson)+')'
else:
groupbystring = 'groupby([pd.Grouper(freq=\\''+frequency+'\\'),\\''+groupbyfeatures+'\\']).agg('+str(aggjson)+')'
grouperbyjson['groupbystring'] = groupbystring
print(grouperbyjson)
if groupbyfeatures == '':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency)]).agg(aggjson)
else:
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).agg(aggjson)
dataframe.columns = ['_'.join(col) for col in dataframe.columns]
dataframe = dataframe.reset_index()
self.log.info(dataframe.head(10))
'''
if operation.lower() == 'size':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).size()
elif operation.lower() == 'mean':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).mean()
elif operation.lower() == 'max':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).max()
elif operation.lower() == 'min':
dataframe = dataframe.groupby([pd.Grouper(freq=frequency),groupbyfeatures]).min()
dataframe = dataframe.rename("groupby_value")
dataframe = dataframe.to_frame()
dataframe = dataframe.reset_index()
'''
return dataframe,grouperbyjson
def readDf(self,dataF,featureList,targetColumn):
dataDf = dataF[featureList]
predictDf =dataF[targetColumn]
return dataDf,predictDf
def csvTodf(self,dataPath,delimiter,textqualifier):
'''
if os.path.splitext(dataPath)[1] == ".tsv":
dataFrame=pd.read_csv(dataPath,encoding='latin1',sep='\\t')
else:
dataFrame=pd.read_csv(dataPath,encoding='latin1')
'''
if os.path.splitext(dataPath)[1] == ".py":
f = open(dataPath, "r")
pythoncode = f.read()
f.close()
ldict = {}
exec(pythoncode, globals(), ldict)
dataFrame = ldict['dfpy']
else:
dataFrame=pd.read_csv(dataPath,encoding='utf-8',sep=delimiter,quotechar=textqualifier, skipinitialspace = True,na_values=['-','?'],encoding_errors= 'replace')
dataFrame.rename(columns=lambda x: x.strip(), inplace=True)
return dataFrame
def read_file(self, fileName):
fileName = Path(fileName)
if fileName.suffix == '.pdf':
pdf = pdfplumber.open(fileName)
text = ''
for index, page in enumerate(pdf.pages):
if index:
text += ' '
text += page.extract_text()
else:
with open(fileName, "r",encoding="utf-8") as f:
text = f.read()
return text
def documentsTodf(self,folderlocation,labelFilePath):
dataDf = pd.DataFrame()
error_message = ""
dataset_csv_file = os.path.join(folderlocation,labelFilePath)
labels = pd.read_csv(dataset_csv_file)
dataDict = {}
keys = ["File","Label"]
for key in keys:
dataDict[key] = []
for i in range(len(labels)):
filename = os.path.join(folderlocation,labels.loc[i,"File"])
dataDict["File"].append(self.read_file(filename))
dataDict["Label"].append(labels.loc[i,"Label"])
dataDf = pd.DataFrame.from_dict(dataDict)
error_message = ""
return dataDf, error_message
def removeFeatures(self,df,datetimeFeature,indexFeature,modelFeatures,targetFeature):
self.log.info("\\n---------- Prepare Features ----------")
if(str(datetimeFeature).lower() != 'na'):
datetimeFeature = datetimeFeature.split(",")
datetimeFeature = list(map(str.strip, datetimeFeature))
for dtfeature in datetimeFeature:
if dtfeature in df.columns:
self.log.info("-------> Remove Date Time Feature: "+dtfeature)
df = df.drop(columns=dtfeature)
if(str(indexFeature).lower() != 'na'):
indexFeature = indexFeature.split(",")
indexFeature = list(map(str.strip, indexFeature))
for ifeature in indexFeature:
if ifeature in df.columns:
self.log.info("-------> Remove Index Feature: "+ifeature)
df = df.drop(columns=ifeature)
if(str(modelFeatures).lower() != 'na'):
self.log.info("-------> Model Features: "+str(modelFeatures))
modelFeatures = modelFeatures.split(",")
modelFeatures = list(map(str.strip, modelFeatures))
if(targetFeature != '' and str(targetFeature).lower() != 'na'):
targetFeature = targetFeature.split(",")
targetFeature = list(map(str.strip, targetFeature))
for ifeature in targetFeature:
if ifeature not in modelFeatures:
modelFeatures.append(ifeature)
if(str(indexFeature).lower() != 'na'):
for ifeature in indexFeature:
if ifeature in modelFeatures:
modelFeatures.remove(ifeature)
if(str(datetimeFeature).lower() != 'na'):
for dtfeature in datetimeFeature:
if dtfeature in modelFeatures:
modelFeatures.remove(dtfeature)
df = df[modelFeatures]
self.log.info("---------- Prepare Features End ----------")
return(df)
def splitImageDataset(self, df, ratio, modelType):
if modelType.lower() == "objectdetection":
images = df['File'].unique().tolist()
trainImages = random.sample(images, int(len(images) * ratio))
mask = [0] * len(df)
for i in range(len(df)):
mask[i] = df.iloc[i]['File'] in trainImages
trainDf = df.iloc[mask]
testDf = df.iloc[[not elem for elem in mask]]
return trainDf, testDf
else:
return train_test_split(df, test_size=(1 - ratio))
def createTFRecord(self, train_image_dir, output_dir, csv_file, testPercentage, AugEnabled,keepAugImages,operations, modelType,augConf={}):
from transformations import generate_tfrecord
from transformations.imageAug import ImageAugmentation
if isinstance(csv_file, pd.DataFrame):
df = csv_file
else:
df = pd.read_csv(os.path.join(train_image_dir,csv_file))
labelmap_path, num_classes = generate_tfrecord.createLabelFile(df, output_dir)
train_df, test_df = self.splitImageDataset(df, testPercentage/100.0, modelType)
if AugEnabled:
augFile = os.path.join(output_dir,"tempTrainDf.csv")
train_df.to_csv(augFile)
ia = ImageAugmentation(train_image_dir, augFile)
augFile = ia.augment(modelType, operations,None,augConf)
train_df = pd.read_csv(augFile)
generate_tfrecord.generate_TF_record(train_image_dir, output_dir, train_df, test_df, labelmap_path)
if AugEnabled and not keepAugImages:
ia.removeAugmentedImages(train_df)
return train_df, num_classes
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
#System imports
import os
import sys
import json
import datetime,time,timeit
import itertools
import numpy as np
import pandas as pd
import math
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.preprocessing import PowerTransformer
import logging
class dataTransformer():
def __init__(self):
self.log = logging.getLogger('eion')
def startTransformer(self,df,features,target,transType):
scaler ='None'
if target in features:
features.remove(target)
transFeatures=features
transDfColumns=[]
dataframe=df[transFeatures]
#targetArray=np.array(df[target])
#targetArray.shape = (len(targetArray), 1)
self.log.info("Data Normalization has started")
if transType.lower() =='standardscaler':
scaler = StandardScaler().fit(dataframe)
transDf = scaler.transform(dataframe)
elif transType.lower() =='minmax':
scaler=MinMaxScaler().fit(dataframe)
transDf = scaler.transform(dataframe)
elif transType.lower() =='lognormal':
print(dataframe)
scaler = PowerTransformer(method='yeo-johnson', standardize=False).fit(dataframe)
transDf = scaler.transform(dataframe)
else:
self.log.info("Need to implement")
#features.append(target)
#scaledDf = pd.DataFrame(np.hstack((transDf, targetArray)),columns=features)
return transDf,features,scaler<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import io
import json
import logging
import pandas as pd
import sys
import numpy as np
from pathlib import Path
from word2number import w2n
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OrdinalEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.impute import SimpleImputer, KNNImputer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import FunctionTransformer
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.preprocessing import PowerTransformer
from sklearn.compose import ColumnTransformer
from sklearn.base import TransformerMixin
from sklearn.ensemble import IsolationForest
from category_encoders import TargetEncoder
try:
import transformations.data_profiler_functions as cs
except:
import data_profiler_functions as cs
if 'AION' in sys.modules:
try:
from appbe.app_config import DEBUG_ENABLED
except:
DEBUG_ENABLED = False
else:
DEBUG_ENABLED = False
log_suffix = f'[{Path(__file__).stem}] '
class profiler():
def __init__(self, xtrain, ytrain=None, target=None, encode_target = False, config={}, keep_unprocessed=[],data_path=None,log=None):
if not isinstance(xtrain, pd.DataFrame):
raise ValueError(f'{log_suffix}supported data type is pandas.DataFrame but provide data is of {type(xtrain)} type')
if xtrain.empty:
raise ValueError(f'{log_suffix}Data frame is empty')
if target and target in xtrain.columns:
self.target = xtrain[target]
xtrain.drop(target, axis=1, inplace=True)
self.target_name = target
elif ytrain:
self.target = ytrain
self.target_name = 'target'
else:
self.target = pd.Series()
self.target_name = None
self.data_path = data_path
self.encode_target = encode_target
self.label_encoder = None
self.data = xtrain
self.keep_unprocessed = keep_unprocessed
self.colm_type = {}
for colm, infer_type in zip(self.data.columns, self.data.dtypes):
self.colm_type[colm] = infer_type
self.numeric_feature = []
self.cat_feature = []
self.text_feature = []
self.wordToNumericFeatures = []
self.added_features = []
self.pipeline = []
self.dropped_features = |
{}
self.train_features_type={}
self.__update_type()
self.config = config
self.featureDict = config.get('featureDict', [])
self.output_columns = []
self.feature_expender = []
self.text_to_num = {}
self.force_numeric_conv = []
if log:
self.log = log
else:
self.log = logging.getLogger('eion')
self.type_conversion = {}
self.log_input_feat_info()
def log_input_feat_info(self):
if self.featureDict:
feature_df = pd.DataFrame(self.featureDict)
log_text = '\\nPreprocessing options:'
log_text += '\\n\\t'+str(feature_df.head( len(self.featureDict))).replace('\\n','\\n\\t')
self.log.info(log_text)
def log_dataframe(self, msg=None):
buffer = io.StringIO()
self.data.info(buf=buffer)
if msg:
log_text = f'Data frame after {msg}:'
else:
log_text = 'Data frame:'
log_text += '\\n\\t'+str(self.data.head(2)).replace('\\n','\\n\\t')
log_text += ('\\n\\t' + buffer.getvalue().replace('\\n','\\n\\t'))
self.log.info(log_text)
def transform(self):
if self.is_target_available():
if self.target_name:
self.log.info(f"Target feature name: '{self.target_name}'")
self.log.info(f"Target feature size: {len(self.target)}")
else:
self.log.info(f"Target feature not present")
self.log_dataframe()
print(self.data.info())
try:
self.process()
except Exception as e:
self.log.error(e, exc_info=True)
raise
pipe = FeatureUnion(self.pipeline)
try:
if self.text_feature:
from text.textProfiler import set_pretrained_model
set_pretrained_model(pipe)
conversion_method = self.get_conversion_method()
process_data = pipe.fit_transform(self.data, y=self.target)
# save for testing
if DEBUG_ENABLED:
if not isinstance(process_data, np.ndarray):
process_data = process_data.toarray()
df = pd.DataFrame(process_data)
df.to_csv('debug_preprocessed.csv', index=False)
if self.text_feature and conversion_method == 'latentsemanticanalysis':
n_size = self.get_tf_idf_output_size( pipe)
dimensions = self.get_tf_idf_dimensions()
if n_size < dimensions or n_size > dimensions:
dimensions = n_size
from sklearn.decomposition import TruncatedSVD
reducer = TruncatedSVD( n_components = dimensions)
reduced_data = reducer.fit_transform( process_data[:,-n_size:])
text_process_idx = [t[0] for t in pipe.transformer_list].index('text_process')
pipe.transformer_list[text_process_idx][1].steps.append(('feature_reducer',reducer))
if not isinstance(process_data, np.ndarray):
process_data = process_data.toarray()
process_data = np.concatenate((process_data[:,:-n_size], reduced_data), axis=1)
last_step = self.feature_expender.pop()
self.feature_expender.append({'feature_reducer':list(last_step.values())[0]})
except EOFError as e:
if "Compressed file ended before the end-of-stream marker was reached" in str(e):
raise EOFError('Pretrained model is not downloaded properly')
self.update_output_features_names(pipe)
if not isinstance(process_data, np.ndarray):
process_data = process_data.toarray()
df = pd.DataFrame(process_data, index=self.data.index, columns=self.output_columns)
if self.is_target_available() and self.target_name:
df[self.target_name] = self.target
if self.keep_unprocessed:
df[self.keep_unprocessed] = self.data[self.keep_unprocessed]
self.log_numerical_fill()
self.log_categorical_fill()
self.log_normalization()
return df, pipe, self.label_encoder
def log_type_conversion(self):
if self.log:
self.log.info('----------- Inspecting Features -----------')
self.log.info('----------- Type Conversion -----------')
count = 0
for k, v in self.type_conversion.items():
if v[0] != v[1]:
self.log.info(f'-------> {k} -> from {v[0]} to {v[1]} : {v[2]}')
self.log.info('Status:- |... Feature inspection done')
def check_config(self):
removeDuplicate = self.config.get('removeDuplicate', False)
self.config['removeDuplicate'] = cs.get_boolean(removeDuplicate)
self.config['misValueRatio'] = float(self.config.get('misValueRatio', cs.default_config['misValueRatio']))
self.config['numericFeatureRatio'] = float(self.config.get('numericFeatureRatio', cs.default_config['numericFeatureRatio']))
self.config['categoryMaxLabel'] = int(self.config.get('categoryMaxLabel', cs.default_config['categoryMaxLabel']))
featureDict = self.config.get('featureDict', [])
if isinstance(featureDict, dict):
self.config['featureDict'] = []
if isinstance(featureDict, str):
self.config['featureDict'] = []
def process(self):
#remove duplicate not required at the time of prediction
self.check_config()
self.remove_constant_feature()
self.remove_empty_feature(self.config['misValueRatio'])
self.remove_index_features()
self.dropna()
if self.config['removeDuplicate']:
self.drop_duplicate()
#self.check_categorical_features()
#self.string_to_numeric()
self.process_target()
self.train_features_type = {k:v for k,v in zip(self.data.columns, self.data.dtypes)}
self.parse_process_step_config()
self.process_drop_fillna()
self.log_type_conversion()
self.update_num_fill_dict()
if DEBUG_ENABLED:
print(self.num_fill_method_dict)
self.update_cat_fill_dict()
self.create_pipeline()
self.text_pipeline(self.config)
self.apply_outlier()
if DEBUG_ENABLED:
self.log.info(self.process_method)
self.log.info(self.pipeline)
def is_target_available(self):
return (isinstance(self.target, pd.Series) and not self.target.empty) or len(self.target)
def process_target(self, operation='encode', arg=None):
if self.is_target_available():
# drop null values
self.__update_index( self.target.notna(), 'target')
if self.encode_target:
self.label_encoder = LabelEncoder()
self.target = self.label_encoder.fit_transform(self.target)
return self.label_encoder
return None
def is_target_column(self, column):
return column == self.target_name
def fill_default_steps(self):
num_fill_method = cs.get_one_true_option(self.config.get('numericalFillMethod',{}))
normalization_method = cs.get_one_true_option(self.config.get('normalization',{}),'none')
for colm in self.numeric_feature:
if num_fill_method:
self.fill_missing_value_method(colm, num_fill_method.lower())
if normalization_method:
self.fill_normalizer_method(colm, normalization_method.lower())
cat_fill_method = cs.get_one_true_option(self.config.get('categoricalFillMethod',{}))
cat_encode_method = cs.get_one_true_option(self.config.get('categoryEncoding',{}))
for colm in self.cat_feature:
if cat_fill_method:
self.fill_missing_value_method(colm, cat_fill_method.lower())
if cat_encode_method:
self.fill_encoder_value_method(colm, cat_encode_method.lower(), default=True)
def parse_process_step_config(self):
self.process_method = {}
user_provided_data_type = {}
for feat_conf in self.featureDict:
colm = feat_conf.get('feature', '')
if not self.is_target_column(colm):
if colm in self.data.columns:
user_provided_data_type[colm] = feat_conf['type']
if user_provided_data_type:
self.update_user_provided_type(user_provided_data_type)
self.fill_default_steps()
for feat_conf in self.featureDict:
colm = feat_conf.get('feature', '')
if not self.is_target_column(colm):
if colm in self.data.columns:
if feat_conf.get('fillMethod', None):
self.fill_missing_value_method(colm, feat_conf['fillMethod'].lower())
if feat_conf.get('categoryEncoding', None):
self.fill_encoder_value_method(colm, feat_conf['categoryEncoding'].lower())
if feat_conf.get('normalization', None):
self.fill_normalizer_method(colm, feat_conf['normalization'].lower())
if feat_conf.get('outlier', None):
self.fill_outlier_method(colm, feat_conf['outlier'].lower())
if feat_conf.get('outlierOperation', None):
self.fill_outlier_process(colm, feat_conf['outlierOperation'].lower())
def get_tf_idf_dimensions(self):
dim = cs.get_one_true_option(self.config.get('embeddingSize',{}).get('TF_IDF',{}), 'default')
return {'default': 300, '50d':50, '100d':100, '200d':200, '300d':300}[dim]
def get_tf_idf_output_size(self, pipe):
start_index = {}
for feat_expender in self.feature_expender:
if feat_expender:
step_name = list(feat_expender.keys())[0]
index = list(feat_expender.values())[0]
for transformer_step in pipe.transformer_list:
if transformer_step[1].steps[-1][0] in step_name:
start_index[index] = {transformer_step[1].steps[-1][0]: transformer_step[1].steps[-1][1].get_feature_names_out()}
if start_index:
for key,value in start_index.items():
for k,v in value.items():
if k == 'vectorizer':
return len(v)
return 0
def update_output_features_names(self, pipe):
columns = self.output_columns
start_index = {}
index_shifter = 0
for feat_expender in self.feature_expender:
if feat_expender:
step_name = list(feat_expender.keys())[0]
for key,value in start_index.items():
for k,v in value.items():
index_shifter += len(v)
index = list(feat_expender.values())[0]
for transformer_step in pipe.transformer_list:
if transformer_step[1].steps[-1][0] in step_name:
start_index[index + index_shifter] = {transformer_step[1].steps[-1][0]: transformer_step[1].steps[-1][1].get_feature_names_out()}
#print(start_index)
if start_index:
for key,value in start_index.items():
for k,v in value.items():
if k == 'vectorizer':
v = [f'{x}_vect' for x in v]
self.output_columns[key:key] = v
self.added_features = [*self.added_features, *v]
def text_pipeline(self, conf_json):
if self.text_feature:
from text.textProfiler import textProfiler
from text.textProfiler import textCombine
pipeList = []
text_pipe = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", self.text_feature)
], remainder="drop")),
("text_fillNa",SimpleImputer(strategy='constant', fill_value='')),
("merge_text_feature", textCombine())])
obj = textProfiler()
pipeList = obj.cleaner(conf_json, pipeList, self.data_path)
pipeList = obj.embedding(conf_json, pipeList)
last_step = "merge_text_feature"
for pipe_elem in pipeList:
text_pipe.steps.append((pipe_elem[0], pipe_elem[1]))
last_step = pipe_elem[0]
text_transformer = ('text_process', text_pipe)
self.pipeline.append(text_transformer)
self.feature_expender.append({last_step:len(self.output_columns)})
def create_pipeline(self):
num_pipe = {}
for k,v in self.num_fill_method_dict.items():
for k1,v1 in v.items():
if k1 and k1 != 'none':
num_pipe[f'{k}_{k1}'] = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", v1)
], remainder="drop")),
(k, self.get_num_imputer(k)),
(k1, self.get_num_scaler(k1))
])
else:
num_pipe[f'{k}_{k1}'] = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", v1)
], remainder="drop")),
(k, self.get_num_imputer(k))
])
self.output_columns.extend(v1)
cat_pipe = {}
for k,v in self.cat_fill_method_dict.items():
for k1,v1 in v.items():
cat_pipe[f'{k}_{ |
k1}'] = Pipeline([
('selector', ColumnTransformer([
("selector", "passthrough", v1)
], remainder="drop")),
(k, self.get_cat_imputer(k)),
(k1, self.get_cat_encoder(k1))
])
if k1 not in ['onehotencoding']:
self.output_columns.extend(v1)
else:
self.feature_expender.append({k1:len(self.output_columns)})
for key, pipe in num_pipe.items():
self.pipeline.append((key, pipe))
for key, pipe in cat_pipe.items():
self.pipeline.append((key, pipe))
"Drop: feature during training but replace with zero during prediction "
def process_drop_fillna(self):
drop_column = []
if 'numFill' in self.process_method.keys():
for col, method in self.process_method['numFill'].items():
if method == 'drop':
self.process_method['numFill'][col] = 'zero'
drop_column.append(col)
if 'catFill' in self.process_method.keys():
for col, method in self.process_method['catFill'].items():
if method == 'drop':
self.process_method['catFill'][col] = 'zero'
drop_column.append(col)
if drop_column:
self.data.dropna(subset=drop_column, inplace=True)
def update_num_fill_dict(self):
self.num_fill_method_dict = {}
if 'numFill' in self.process_method.keys():
for f in cs.supported_method['fillNa']['numeric']:
self.num_fill_method_dict[f] = {}
for en in cs.supported_method['normalization']:
self.num_fill_method_dict[f][en] = []
for col in self.numeric_feature:
numFillDict = self.process_method.get('numFill',{})
normalizationDict = self.process_method.get('normalization',{})
if f == numFillDict.get(col, '') and en == normalizationDict.get(col,''):
self.num_fill_method_dict[f][en].append(col)
if not self.num_fill_method_dict[f][en] :
del self.num_fill_method_dict[f][en]
if not self.num_fill_method_dict[f]:
del self.num_fill_method_dict[f]
def update_cat_fill_dict(self):
self.cat_fill_method_dict = {}
if 'catFill' in self.process_method.keys():
for f in cs.supported_method['fillNa']['categorical']:
self.cat_fill_method_dict[f] = {}
for en in cs.supported_method['categoryEncoding']:
self.cat_fill_method_dict[f][en] = []
for col in self.cat_feature:
catFillDict = self.process_method.get('catFill',{})
catEncoderDict = self.process_method.get('catEncoder',{})
if f == catFillDict.get(col, '') and en == catEncoderDict.get(col,''):
self.cat_fill_method_dict[f][en].append(col)
if not self.cat_fill_method_dict[f][en] :
del self.cat_fill_method_dict[f][en]
if not self.cat_fill_method_dict[f]:
del self.cat_fill_method_dict[f]
def __update_type(self):
self.numeric_feature = list( set(self.data.select_dtypes(include='number').columns.tolist()) - set(self.keep_unprocessed))
self.cat_feature = list( set(self.data.select_dtypes(include='category').columns.tolist()) - set(self.keep_unprocessed))
self.text_feature = list( set(self.data.select_dtypes(include='object').columns.tolist()) - set(self.keep_unprocessed))
self.datetime_feature = list( set(self.data.select_dtypes(include='datetime').columns.tolist()) - set(self.keep_unprocessed))
def update_user_provided_type(self, data_types):
allowed_types = ['numerical','categorical', 'text']
skipped_types = ['date','index']
type_mapping = {'numerical': np.dtype('float'), 'float': np.dtype('float'),'categorical': 'category', 'text':np.dtype('object'),'date':'datetime64[ns]','index': np.dtype('int64'),}
mapped_type = {k:type_mapping[v] for k,v in data_types.items() if v in allowed_types}
skipped_features = [k for k,v in data_types.items() if v in skipped_types]
if skipped_features:
self.keep_unprocessed.extend( skipped_features)
self.keep_unprocessed = list(set(self.keep_unprocessed))
self.update_type(mapped_type, 'user provided data type')
def get_type(self, as_list=False):
if as_list:
return [self.colm_type.values()]
else:
return self.colm_type
def update_type(self, data_types={}, reason=''):
invalid_features = [x for x in data_types.keys() if x not in self.data.columns]
if invalid_features:
valid_feat = list(set(data_types.keys()) - set(invalid_features))
valid_feat_type = {k:v for k,v in data_types if k in valid_feat}
else:
valid_feat_type = data_types
for k,v in valid_feat_type.items():
if v != self.colm_type[k].name:
try:
self.data.astype({k:v})
self.colm_type.update({k:self.data[k].dtype})
self.type_conversion[k] = (self.colm_type[k] , v, 'Done', reason)
except:
self.type_conversion[k] = (self.colm_type[k] , v, 'Fail', reason)
if v == np.dtype('float64') and self.colm_type[k].name == 'object':
if self.check_numeric( k):
self.data[ k] = pd.to_numeric(self.data[ k], errors='coerce')
self.type_conversion[k] = (self.colm_type[k] , v, 'Done', reason)
self.force_numeric_conv.append( k)
else:
raise ValueError(f"Can not convert '{k}' feature to 'numeric' as numeric values are less than {self.config['numericFeatureRatio'] * 100}%")
self.data = self.data.astype(valid_feat_type)
self.__update_type()
def check_numeric(self, feature):
col_values = self.data[feature].copy()
col_values = pd.to_numeric(col_values, errors='coerce')
if col_values.count() >= (self.config['numericFeatureRatio'] * len(col_values)):
return True
return False
def string_to_numeric(self):
def to_number(x):
try:
return w2n.word_to_num(x)
except:
return np.nan
for col in self.text_feature:
col_values = self.data[col].copy()
col_values = pd.to_numeric(col_values, errors='coerce')
if col_values.count() >= (self.config['numericFeatureRatio'] * len(col_values)):
self.text_to_num[col] = 'float64'
self.wordToNumericFeatures.append(col)
if self.text_to_num:
columns = list(self.text_to_num.keys())
self.data[columns] = self.data[columns].apply(lambda x: to_number(x), axis=1, result_type='broadcast')
self.update_type(self.text_to_num)
self.log.info('----------- Inspecting Features -----------')
for col in self.text_feature:
self.log.info(f'-------> Feature : {col}')
if col in self.text_to_num:
self.log.info('----------> Numeric Status :Yes')
self.log.info('----------> Data Type Converting to numeric :Yes')
else:
self.log.info('----------> Numeric Status :No')
self.log.info(f'\\nStatus:- |... Feature inspection done for numeric data: {len(self.text_to_num)} feature(s) converted to numeric')
self.log.info(f'\\nStatus:- |... Feature word to numeric treatment done: {self.text_to_num}')
self.log.info('----------- Inspecting Features End -----------')
def check_categorical_features(self):
num_data = self.data.select_dtypes(include='number')
num_data_unique = num_data.nunique()
num_to_cat_col = {}
for i, value in enumerate(num_data_unique):
if value < self.config['categoryMaxLabel']:
num_to_cat_col[num_data_unique.index[i]] = 'category'
if num_to_cat_col:
self.update_type(num_to_cat_col, 'numerical to categorical')
str_to_cat_col = {}
str_data = self.data.select_dtypes(include='object')
str_data_unique = str_data.nunique()
for i, value in enumerate(str_data_unique):
if value < self.config['categoryMaxLabel']:
str_to_cat_col[str_data_unique.index[i]] = 'category'
for colm in str_data.columns:
if self.data[colm].str.len().max() < cs.default_config['str_to_cat_len_max']:
str_to_cat_col[colm] = 'category'
if str_to_cat_col:
self.update_type(str_to_cat_col, 'text to categorical')
def drop_features(self, features=[], reason='unspecified'):
if isinstance(features, str):
features = [features]
feat_to_remove = [x for x in features if x in self.data.columns]
if feat_to_remove:
self.data.drop(feat_to_remove, axis=1, inplace=True)
for feat in feat_to_remove:
self.dropped_features[feat] = reason
self.log_drop_feature(feat_to_remove, reason)
self.__update_type()
def __update_index(self, indices, reason=''):
if isinstance(indices, (bool, pd.core.series.Series)) and len(indices) == len(self.data):
if not indices.all():
self.data = self.data[indices]
if self.is_target_available():
self.target = self.target[indices]
self.log_update_index((indices == False).sum(), reason)
def dropna(self):
self.data.dropna(how='all',inplace=True)
if self.is_target_available():
self.target = self.target[self.data.index]
def drop_duplicate(self):
index = self.data.duplicated(keep='first')
self.__update_index( ~index, reason='duplicate')
def log_drop_feature(self, columns, reason):
self.log.info(f'---------- Dropping {reason} features ----------')
self.log.info(f'\\nStatus:- |... {reason} feature treatment done: {len(columns)} {reason} feature(s) found')
self.log.info(f'-------> Drop Features: {columns}')
self.log.info(f'Data Frame Shape After Dropping (Rows,Columns): {self.data.shape}')
def log_update_index(self,count, reason):
if count:
if reason == 'target':
self.log.info('-------> Null Target Rows Drop:')
self.log.info(f'-------> Dropped rows count: {count}')
elif reason == 'duplicate':
self.log.info('-------> Duplicate Rows Drop:')
self.log.info(f'-------> Dropped rows count: {count}')
elif reason == 'outlier':
self.log.info(f'-------> Dropped rows count: {count}')
self.log.info('Status:- |... Outlier treatment done')
self.log.info(f'-------> Data Frame Shape After Dropping samples(Rows,Columns): {self.data.shape}')
def log_normalization(self):
if self.process_method.get('normalization', None):
self.log.info(f'\\nStatus:- !... Normalization treatment done')
for method in cs.supported_method['normalization']:
cols = []
for col, m in self.process_method['normalization'].items():
if m == method:
cols.append(col)
if cols and method != 'none':
self.log.info(f'Running {method} on features: {cols}')
def log_numerical_fill(self):
if self.process_method.get('numFill', None):
self.log.info(f'\\nStatus:- !... Fillna for numeric feature done')
for method in cs.supported_method['fillNa']['numeric']:
cols = []
for col, m in self.process_method['numFill'].items():
if m == method:
cols.append(col)
if cols:
self.log.info(f'-------> Running {method} on features: {cols}')
def log_categorical_fill(self):
if self.process_method.get('catFill', None):
self.log.info(f'\\nStatus:- !... FillNa for categorical feature done')
for method in cs.supported_method['fillNa']['categorical']:
cols = []
for col, m in self.process_method['catFill'].items():
if m == method:
cols.append(col)
if cols:
self.log.info(f'-------> Running {method} on features: {cols}')
def remove_constant_feature(self):
unique_values = self.data.nunique()
constant_features = []
for i, value in enumerate(unique_values):
if value == 1:
constant_features.append(unique_values.index[i])
if constant_features:
self.drop_features(constant_features, "constant")
def remove_empty_feature(self, misval_ratio=1.0):
missing_ratio = self.data.isnull().sum() / len(self.data)
missing_ratio = {k:v for k,v in zip(self.data.columns, missing_ratio)}
empty_features = [k for k,v in missing_ratio.items() if v > misval_ratio]
if empty_features:
self.drop_features(empty_features, "empty")
def remove_index_features(self |
):
index_feature = []
for feat in self.numeric_feature:
if self.data[feat].nunique() == len(self.data):
#if (self.data[feat].sum()- sum(self.data.index) == (self.data.iloc[0][feat]-self.data.index[0])*len(self.data)):
# index feature can be time based
count = (self.data[feat] - self.data[feat].shift() == 1).sum()
if len(self.data) - count == 1:
index_feature.append(feat)
self.drop_features(index_feature, "index")
def fill_missing_value_method(self, colm, method):
if colm in self.numeric_feature:
if method in cs.supported_method['fillNa']['numeric']:
if 'numFill' not in self.process_method.keys():
self.process_method['numFill'] = {}
if method == 'na' and self.process_method['numFill'].get(colm, None):
pass # don't overwrite
else:
self.process_method['numFill'][colm] = method
if colm in self.cat_feature:
if method in cs.supported_method['fillNa']['categorical']:
if 'catFill' not in self.process_method.keys():
self.process_method['catFill'] = {}
if method == 'na' and self.process_method['catFill'].get(colm, None):
pass
else:
self.process_method['catFill'][colm] = method
def check_encoding_method(self, method, colm,default=False):
if not self.is_target_available() and (method.lower() == list(cs.target_encoding_method_change.keys())[0]):
method = cs.target_encoding_method_change[method.lower()]
if default:
self.log.info(f"Applying Label encoding instead of Target encoding on feature '{colm}' as target feature is not present")
return method
def fill_encoder_value_method(self,colm, method, default=False):
if colm in self.cat_feature:
if method.lower() in cs.supported_method['categoryEncoding']:
if 'catEncoder' not in self.process_method.keys():
self.process_method['catEncoder'] = {}
if method == 'na' and self.process_method['catEncoder'].get(colm, None):
pass
else:
self.process_method['catEncoder'][colm] = self.check_encoding_method(method, colm,default)
else:
self.log.info(f"-------> categorical encoding method '{method}' is not supported. supported methods are {cs.supported_method['categoryEncoding']}")
def fill_normalizer_method(self,colm, method):
if colm in self.numeric_feature:
if method in cs.supported_method['normalization']:
if 'normalization' not in self.process_method.keys():
self.process_method['normalization'] = {}
if (method == 'na' or method == 'none') and self.process_method['normalization'].get(colm, None):
pass
else:
self.process_method['normalization'][colm] = method
else:
self.log.info(f"-------> Normalization method '{method}' is not supported. supported methods are {cs.supported_method['normalization']}")
def apply_outlier(self):
inlier_indice = np.array([True] * len(self.data))
if self.process_method.get('outlier', None):
self.log.info('-------> Feature wise outlier detection:')
for k,v in self.process_method['outlier'].items():
if k in self.numeric_feature:
if v == 'iqr':
index = cs.findiqrOutlier(self.data[k])
elif v == 'zscore':
index = cs.findzscoreOutlier(self.data[k])
elif v == 'disable':
index = None
if k in self.process_method['outlierOperation'].keys():
if self.process_method['outlierOperation'][k] == 'dropdata':
inlier_indice = np.logical_and(inlier_indice, index)
elif self.process_method['outlierOperation'][k] == 'average':
mean = self.data[k].mean()
index = ~index
self.data.loc[index,[k]] = mean
self.log.info(f'-------> {k}: Replaced by Mean {mean}: total replacement {index.sum()}')
elif self.process_method['outlierOperation'][k] == 'nochange' and v != 'disable':
self.log.info(f'-------> Total outliers in "{k}": {(~index).sum()}')
if self.config.get('outlierDetection',None):
if self.config['outlierDetection'].get('IsolationForest','False') == 'True':
if self.numeric_feature:
index = cs.findiforestOutlier(self.data[self.numeric_feature])
inlier_indice = np.logical_and(inlier_indice, index)
self.log.info(f'-------> Numeric feature based Outlier detection(IsolationForest):')
if inlier_indice.sum() != len(self.data):
self.__update_index(inlier_indice, 'outlier')
def fill_outlier_method(self,colm, method):
if colm in self.numeric_feature:
if method in cs.supported_method['outlier_column_wise']:
if 'outlier' not in self.process_method.keys():
self.process_method['outlier'] = {}
if method not in ['Disable', 'na']:
self.process_method['outlier'][colm] = method
else:
self.log.info(f"-------> outlier detection method '{method}' is not supported for column wise. supported methods are {cs.supported_method['outlier_column_wise']}")
def fill_outlier_process(self,colm, method):
if colm in self.numeric_feature:
if method in cs.supported_method['outlierOperation']:
if 'outlierOperation' not in self.process_method.keys():
self.process_method['outlierOperation'] = {}
self.process_method['outlierOperation'][colm] = method
else:
self.log.info(f"-------> outlier process method '{method}' is not supported for column wise. supported methods are {cs.supported_method['outlierOperation']}")
def get_cat_imputer(self,method):
if method == 'mode':
return SimpleImputer(strategy='most_frequent')
elif method == 'zero':
return SimpleImputer(strategy='constant', fill_value=0)
def get_cat_encoder(self,method):
if method == 'labelencoding':
return OrdinalEncoder()
elif method == 'onehotencoding':
return OneHotEncoder(sparse=False,handle_unknown="ignore")
elif method == 'targetencoding':
if not self.is_target_available():
raise ValueError('Can not apply Target Encoding when target feature is not present')
return TargetEncoder()
def get_num_imputer(self,method):
if method == 'mode':
return SimpleImputer(strategy='most_frequent')
elif method == 'mean':
return SimpleImputer(strategy='mean')
elif method == 'median':
return SimpleImputer(strategy='median')
elif method == 'knnimputer':
return KNNImputer()
elif method == 'zero':
return SimpleImputer(strategy='constant', fill_value=0)
def get_num_scaler(self,method):
if method == 'minmax':
return MinMaxScaler()
elif method == 'standardscaler':
return StandardScaler()
elif method == 'lognormal':
return PowerTransformer(method='yeo-johnson', standardize=False)
def recommenderStartProfiler(self,modelFeatures):
return cs.recommenderStartProfiler(self,modelFeatures)
def folderPreprocessing(self,folderlocation,folderdetails,deployLocation):
return cs.folderPreprocessing(self,folderlocation,folderdetails,deployLocation)
def textSimilarityStartProfiler(self, doc_col_1, doc_col_2):
return cs.textSimilarityStartProfiler(self, doc_col_1, doc_col_2)
def get_conversion_method(self):
return cs.get_one_true_option(self.config.get('textConversionMethod','')).lower()
def set_features(features,profiler=None):
return cs.set_features(features,profiler)
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import random
from matplotlib import pyplot as plt
import cv2
import albumentations as A
import os
import pandas as pd
from pathlib import Path
class ImageAugmentation():
def __init__(self, dataLocation, csvFile):
self.AugmentationOptions = {"Flip": {"operation": A.HorizontalFlip, "suffix":"_flip"},
"Rotate": {"operation": A.Rotate, "suffix":"_rotate"},
"Shift": {"operation": A.RGBShift, "suffix":"_shift"},
"Crop": {"operation": [A.CenterCrop, A.RandomSizedBBoxSafeCrop], "suffix":"_crop"},
"Contrast": {"operation": A.RandomContrast, "suffix":"_cont"},
"Brightness": {"operation": A.RandomBrightness, "suffix":"_bright"},
"Blur": {"operation": A.GaussianBlur, "suffix":"_blur"}
}
self.dataLocation = dataLocation
self.csvFile = csvFile
def __applyAugmentationClass(self, image, augmentation,limit):
if augmentation in list(self.AugmentationOptions.keys()):
if augmentation == "Crop":
height, width, _ = image.shape
crop_percentage = random.uniform(0.6, 0.9)
transform = self.AugmentationOptions[augmentation]["operation"][0](height=int(height*crop_percentage), width=int(width*crop_percentage) )
elif augmentation == "Blur":
transform = self.AugmentationOptions[augmentation]["operation"](blur_limit = limit)
elif augmentation in ["Contrast","Brightness"]:
transform = self.AugmentationOptions[augmentation]["operation"](limit = limit)
else:
transform = self.AugmentationOptions[augmentation]["operation"]()
return transform(image=image)
def __applyAugmentation(self, image, augmentation,limit,bboxes=None, category_ids=None, seed=7):
transformOptions = []
if bboxes:
bbox_params = A.BboxParams(format='pascal_voc', label_fields=['category_ids'])
else:
bbox_params = None
if augmentation in list(self.AugmentationOptions.keys()):
if augmentation == "Crop":
height, width, _ = image.shape
crop_percentage = random.uniform(0.6, 0.9)
transformOptions.append(self.AugmentationOptions[augmentation]["operation"][1](height=int(height*crop_percentage), width=int(width*crop_percentage) ))
elif augmentation == "Blur":
transformOptions.append(self.AugmentationOptions[augmentation]["operation"](blur_limit = limit))
elif augmentation in ["Contrast","Brightness"]:
transformOptions.append(self.AugmentationOptions[augmentation]["operation"](limit = limit))
else:
transformOptions.append(self.AugmentationOptions[augmentation]["operation"]())
transform = A.Compose(
transformOptions,
bbox_params=bbox_params,
)
random.seed(seed)
return transform(image=image, bboxes=bboxes, category_ids=category_ids)
else:
return None
def getBBox(self, df, imageLoc, category_name_to_id):
subDf = df[df['loc']==imageLoc]
boxes = []
category = []
for index, row in subDf.iterrows():
boxes.append( [row['xmin'],row['ymin'],row['xmax'],row['ymax']])
category.append(category_name_to_id[row['Label']])
return boxes, category
def __objAug(self, imageLoc, df, classes_names, category_id_to_name, category_name_to_id,limit,numberofImages,op):
for x in range(numberofImages):
bbox, category_ids = self.getBBox(df, imageLoc, category_name_to_id)
image = cv2.imread(imageLoc)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
transformed = self.__applyAugmentation(image, op,limit,bbox, category_ids)
transformed['image'] = cv2.cvtColor(transformed['image'], cv2.COLOR_RGB2BGR)
count = 1
row = df[df['loc']==imageLoc].iloc[0]
filename = (Path(imageLoc).stem +'_'+str(x)+ self.AugmentationOptions[op]["suffix"] + Path(imageLoc).suffix)
newImage = str(Path(imageLoc).parent/filename)
for index,bbox in enumerate(transformed['bboxes']):
data = {'File':filename, 'xmin':bbox[0],'ymin':bbox[1],'xmax':bbox[2],'ymax':bbox[3],'Label':category_id_to_name[transformed['category_ids'][index]],'id':count,'height':row['height'],'width':row['width'], 'angle':0.0, 'loc': newImage, 'AugmentedImage': True}
count += 1
df=df.append(data, ignore_index=True)
cv2.imwrite(newImage, transformed['image'])
return df
def __objectDetection(self, images, df, optionDf, classes_names, suffix='',augConf={}):
category_id_to_name = {v+1:k for v,k in enumerate(classes_names)}
category_name_to_id = {k:v+1 for v,k in enumerate(classes_names)}
for i, imageLoc in enumerate(images):
for key in optionDf.columns:
if optionDf.iloc[i][key]:
if key in augConf:
limit = eval(augConf[key].get('limit','0.2'))
numberofImages = int( |
augConf[key].get('noOfImages',1))
else:
limit = 0.2
numberofImages = 1
df = self.__objAug(imageLoc, df, classes_names, category_id_to_name,category_name_to_id,limit,numberofImages,op=key)
return df
def __augClassificationImage(self, imageLoc, df,limit,imageindex,op):
data = {}
image = cv2.imread(imageLoc)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
transformed = self.__applyAugmentationClass(image, op,limit)
transformed['image'] = cv2.cvtColor(transformed['image'], cv2.COLOR_RGB2BGR)
location = Path(imageLoc).parent
filename = (Path(imageLoc).stem +'_'+'str(imageindex)'+ self.AugmentationOptions[op]["suffix"] + Path(imageLoc).suffix)
cv2.imwrite(str(location/'AION'/'AugumentedImages'/filename), transformed['image'])
data['File'] = filename
data['Label'] = df[df['File']==Path(imageLoc).name]["Label"].iloc[0]
data['AugmentedImage'] = True
data['loc'] = str(location/filename)
return data
def __classification(self, images, df, optionDf,augConf,csv_file=None, outputDir=None):
for i, imageLoc in enumerate(images):
for key in optionDf.columns:
if optionDf.iloc[i][key]:
if key in augConf:
limit = eval(augConf[key].get('limit','0.2'))
numberofImages = int(augConf[key].get('noOfImages',1))
else:
limit = 0.2
numberofImages = 1
for x in range(numberofImages):
rows = self.__augClassificationImage(imageLoc, df,limit,x,op=key)
df=df.append(rows, ignore_index=True)
return df
def removeAugmentedImages(self, df):
removeDf = df[df['AugmentedImage'] == True]['loc'].unique().tolist()
#df[df['imageAugmentationOriginalImage'] != True][loocationField].apply(lambda x: Path(x).unlink())
for file in removeDf:
if file:
Path(file).unlink()
def augment(self, modelType="imageclassification",params=None,csvSavePath = None,augConf={}):
if isinstance(params, dict) and any(params.values()):
df = pd.read_csv(self.csvFile)
if not self.dataLocation.endswith('/'):
images = self.dataLocation+'/'
else:
images = self.dataLocation
if modelType == "imageclassification":
images = images + df['File']
else:
images = images + df['File']
df['loc'] = images
images = set(images.tolist())
option = {}
for key in list(self.AugmentationOptions.keys()):
option[key] = params.get(key, False)
optionDf = pd.DataFrame(columns=list(option.keys()))
for i in range(len(images)):
optionDf = optionDf.append(option, ignore_index=True)
if modelType == "imageclassification":
df = self.__classification(images, df, optionDf,augConf)
else:
classes_names = sorted(df['Label'].unique().tolist())
df = self.__objectDetection(images, df, optionDf, classes_names,'',augConf)
df.to_csv(self.csvFile, index=False)
return self.csvFile<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
#System imports
import logging
from distutils.util import strtobool
import pandas as pd
from text import TextProcessing
def get_one_true_option(d, default_value):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
class textProfiler():
def __init__(self):
self.log = logging.getLogger('eion')
def textCleaning(self, textCorpus):
textProcessor = TextProcessing.TextProcessing()
textCorpus = textProcessor.transform(textCorpus)
return(textCorpus)
def textProfiler(self, textCorpus, conf_json, pipeList, max_features):
cleaning_kwargs = {}
textCleaning = conf_json.get('textCleaning')
self.log.info("Text Preprocessing config: ",textCleaning)
cleaning_kwargs['fRemoveNoise'] = strtobool(textCleaning.get('removeNoise', 'True'))
cleaning_kwargs['fNormalize'] = strtobool(textCleaning.get('normalize', 'True'))
cleaning_kwargs['fReplaceAcronym'] = strtobool(textCleaning.get('replaceAcronym', 'False'))
cleaning_kwargs['fCorrectSpelling'] = strtobool(textCleaning.get('correctSpelling', 'False'))
cleaning_kwargs['fRemoveStopwords'] = strtobool(textCleaning.get('removeStopwords', 'True'))
cleaning_kwargs['fRemovePunctuation'] = strtobool(textCleaning.get('removePunctuation', 'True'))
cleaning_kwargs['fRemoveNumericTokens'] = strtobool(textCleaning.get('removeNumericTokens', 'True'))
cleaning_kwargs['normalizationMethod'] = get_one_true_option(textCleaning.get('normalizeMethod'),
'lemmatization').capitalize()
removeNoiseConfig = textCleaning.get('removeNoiseConfig')
if type(removeNoiseConfig) is dict:
cleaning_kwargs['removeNoise_fHtmlDecode'] = strtobool(removeNoiseConfig.get('decodeHTML', 'True'))
cleaning_kwargs['removeNoise_fRemoveHyperLinks'] = strtobool(removeNoiseConfig.get('removeHyperLinks', 'True'))
cleaning_kwargs['removeNoise_fRemoveMentions'] = strtobool(removeNoiseConfig.get('removeMentions', 'True'))
cleaning_kwargs['removeNoise_fRemoveHashtags'] = strtobool(removeNoiseConfig.get('removeHashtags', 'True'))
cleaning_kwargs['removeNoise_RemoveOrReplaceEmoji'] = 'remove' if strtobool(removeNoiseConfig.get('removeEmoji', 'True')) else 'replace'
cleaning_kwargs['removeNoise_fUnicodeToAscii'] = strtobool(removeNoiseConfig.get('unicodeToAscii', 'True'))
cleaning_kwargs['removeNoise_fRemoveNonAscii'] = strtobool(removeNoiseConfig.get('removeNonAscii', 'True'))
acronymConfig = textCleaning.get('acronymConfig')
if type(acronymConfig) is dict:
cleaning_kwargs['acronymDict'] = acronymConfig.get('acronymDict', None)
stopWordsConfig = textCleaning.get('stopWordsConfig')
if type(stopWordsConfig) is dict:
cleaning_kwargs['stopwordsList'] = stopWordsConfig.get('stopwordsList', [])
cleaning_kwargs['extend_or_replace_stopwordslist'] = 'extend' if strtobool(stopWordsConfig.get('extend', 'True')) else 'replace'
removeNumericConfig = textCleaning.get('removeNumericConfig')
if type(removeNumericConfig) is dict:
cleaning_kwargs['removeNumeric_fIncludeSpecialCharacters'] = strtobool(removeNumericConfig.get('removeNumeric_IncludeSpecialCharacters', 'True'))
removePunctuationConfig = textCleaning.get('removePunctuationConfig')
if type(removePunctuationConfig) is dict:
cleaning_kwargs['fRemovePuncWithinTokens'] = strtobool(removePunctuationConfig.get('removePuncWithinTokens', 'False'))
cleaning_kwargs['fExpandContractions'] = strtobool(textCleaning.get('expandContractions', 'False'))
if cleaning_kwargs['fExpandContractions']:
cleaning_kwargs['expandContractions_googleNewsWordVectorPath'] = GOOGLE_NEWS_WORD_VECTORS_PATH
libConfig = textCleaning.get('libConfig')
if type(libConfig) is dict:
cleaning_kwargs['tokenizationLib'] = get_one_true_option(libConfig.get('tokenizationLib'), 'nltk')
cleaning_kwargs['lemmatizationLib'] = get_one_true_option(libConfig.get('lemmatizationLib'), 'nltk')
cleaning_kwargs['stopwordsRemovalLib'] = get_one_true_option(libConfig.get('stopwordsRemovalLib'), 'nltk')
textProcessor = TextProcessing.TextProcessing(**cleaning_kwargs)
textCorpus = textProcessor.transform(textCorpus)
pipeList.append(("TextProcessing",textProcessor))
textFeatureExtraction = conf_json.get('textFeatureExtraction')
if strtobool(textFeatureExtraction.get('pos_tags', 'False')):
pos_tags_lib = get_one_true_option(textFeatureExtraction.get('pos_tags_lib'), 'nltk')
posTagger = TextProcessing.PosTagging( pos_tags_lib)
textCorpus = posTagger.transform(textCorpus)
pipeList.append(("posTagger",posTagger))
ngram_min = 1
ngram_max = 1
if strtobool(textFeatureExtraction.get('n_grams', 'False')):
n_grams_config = textFeatureExtraction.get("n_grams_config")
ngram_min = int(n_grams_config.get('min_n', 1))
ngram_max = int(n_grams_config.get('max_n', 1))
if (ngram_min < 1) or ngram_min > ngram_max:
ngram_min = 1
ngram_max = 1
invalidNgramWarning = 'WARNING : invalid ngram config.\\nUsing the default values min_n={}, max_n={}'.format(ngram_min, ngram_max)
self.log.info(invalidNgramWarning)
ngram_range_tuple = (ngram_min, ngram_max)
textConversionMethod = conf_json.get('textConversionMethod')
conversion_method = get_one_true_option(textConversionMethod, None)
if conversion_method.lower() == "countvectors":
X, vectorizer = TextProcessing.ExtractFeatureCountVectors(textCorpus, ngram_range=ngram_range_tuple, max_features=max_features)
pipeList.append(("vectorizer",vectorizer))
df1 = pd.DataFrame(X.toarray(), columns=vectorizer.get_feature_names())
df1 = df1.add_suffix('_vect')
self.log.info('----------> Conversion Method: CountVectors')
elif conversion_method.lower() in ["word2vec","fasttext","glove"]:
embedding_method = conversion_method
wordEmbeddingVecotrizer = TextProcessing.wordEmbedding(embedding_method)
wordEmbeddingVecotrizer.checkAndDownloadPretrainedModel()
X = wordEmbeddingVecotrizer.transform(textCorpus)
df1 = pd.DataFrame(X)
df1 = df1.add_suffix('_vect')
pipeList.append(("vectorizer",wordEmbeddingVecotrizer))
self.log.info('----------> Conversion Method: '+str(conversion_method))
elif conversion_method.lower() == "sentencetransformer":
from sentence_transformers import SentenceTransformer
model = SentenceTransformer('sentence-transformers/msmarco-distilroberta-base-v2')
X = model.encode(textCorpus)
df1 = pd.DataFrame(X)
df1 = df1.add_suffix('_vect')
pipeList.append(("vectorizer",model))
self.log.info('----------> Conversion Method: SentenceTransformer')
elif conversion_method.lower() == 'tf_idf':
X, vectorizer = TextProcessing.ExtractFeatureTfIdfVectors(textCorpus,ngram_range=ngram_range_tuple, max_features=max_features)
pipeList.append(("vectorizer",vectorizer))
df1 = pd.DataFrame(X.toarray(), columns=vectorizer.get_feature_names())
df1 = df1.add_suffix('_vect')
self.log.info('----------> Conversion Method: TF_IDF')
else:
df1 = pd.DataFrame()
df1['tokenize'] = textCorpus
self.log.info('----------> Conversion Method: NA')
return df1, pipeList,conversion_method
<s> import os
import sys
import numpy as np
import scipy
import pandas as pd
from pathlib import Path
default_config = {
'misValueRatio': '1.0',
'numericFeatureRatio': '1.0',
'categoryMaxLabel': '20',
'str_to_cat_len_max': 10
}
target_encoding_method_change = {'targetencoding': 'labelencoding'}
supported_method = {
'fillNa':
{
'categorical' : ['mode','zero','na'],
'numeric' : ['median','mean','knnimputer','zero','drop','na'],
},
'categoryEncoding': ['labelencoding','targetencoding','onehotencoding','na','none'],
'normalization': ['standardscaler','minmax','lognormal', 'na','none'],
'outlier_column_wise': ['iqr','zscore', 'disable', 'na'],
'outlierOperation': ['dropdata', 'average', 'nochange']
}
def findiqrOutlier(df):
Q1 = df.quantile(0.25)
Q3 = df.quantile(0.75)
IQR = Q3 - Q1
index = ~((df < (Q1 - 1.5 * IQR)) | |
(df > (Q3 + 1.5 * IQR)))
return index
def findzscoreOutlier(df):
z = np.abs(scipy.stats.zscore(df))
index = (z < 3)
return index
def findiforestOutlier(df):
from sklearn.ensemble import IsolationForest
isolation_forest = IsolationForest(n_estimators=100)
isolation_forest.fit(df)
y_pred_train = isolation_forest.predict(df)
return y_pred_train == 1
def get_one_true_option(d, default_value=None):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
def get_boolean(value):
if (isinstance(value, str) and value.lower() == 'true') or (isinstance(value, bool) and value == True):
return True
else:
return False
def recommenderStartProfiler(self,modelFeatures):
try:
self.log.info('----------> FillNA:0')
self.data = self.data.fillna(value=0)
self.log.info('Status:- !... Missing value treatment done')
self.log.info('----------> Remove Empty Row')
self.data = self.data.dropna(axis=0,how='all')
self.log.info('Status:- !... Empty feature treatment done')
userId,itemId,rating = modelFeatures.split(',')
self.data[itemId] = self.data[itemId].astype(np.int32)
self.data[userId] = self.data[userId].astype(np.int32)
self.data[rating] = self.data[rating].astype(np.float32)
return self.data
except Exception as inst:
self.log.info("Error: dataProfiler failed "+str(inst))
return(self.data)
def folderPreprocessing(self,folderlocation,folderdetails,deployLocation):
try:
dataset_directory = Path(folderlocation)
dataset_csv_file = dataset_directory/folderdetails['label_csv_file_name']
tfrecord_directory = Path(deployLocation)/'Video_TFRecord'
from savp import PreprocessSAVP
import csv
csvfile = open(dataset_csv_file, newline='')
csv_reader = csv.DictReader(csvfile)
PreprocessSAVP(dataset_directory,csv_reader,tfrecord_directory)
dataColumns = list(self.data.columns)
VideoProcessing = True
return dataColumns,VideoProcessing,tfrecord_directory
except Exception as inst:
self.log.info("Error: dataProfiler failed "+str(inst))
def textSimilarityStartProfiler(self, doc_col_1, doc_col_2):
import os
try:
features = [doc_col_1, doc_col_2]
pipe = None
dataColumns = list(self.data.columns)
self.numofCols = self.data.shape[1]
self.numOfRows = self.data.shape[0]
from transformations.textProfiler import textProfiler
self.log.info('-------> Execute Fill NA With Empty String')
self.data = self.data.fillna(value=" ")
self.log.info('Status:- |... Missing value treatment done')
self.data[doc_col_1] = textProfiler().textCleaning(self.data[doc_col_1])
self.data[doc_col_2] = textProfiler().textCleaning(self.data[doc_col_2])
self.log.info('-------> Concatenate: ' + doc_col_1 + ' ' + doc_col_2)
self.data['text'] = self.data[[doc_col_1, doc_col_2]].apply(lambda row: ' '.join(row.values.astype(str)), axis=1)
from tensorflow.keras.preprocessing.text import Tokenizer
pipe = Tokenizer()
pipe.fit_on_texts(self.data['text'].values)
self.log.info('-------> Tokenizer: Fit on Concatenate Field')
self.log.info('Status:- |... Tokenizer the text')
self.data[doc_col_1] = self.data[doc_col_1].astype(str)
self.data[doc_col_1] = self.data[doc_col_1].astype(str)
return (self.data, pipe, self.target_name, features)
except Exception as inst:
self.log.info("StartProfiler failed " + str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
def set_features(features,profiler=None):
if profiler:
features = [x for x in features if x not in profiler.added_features]
return features + profiler.text_feature
return features<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import sys
from pathlib import Path
import urllib.request
import tarfile
import json
import subprocess
import os
from os.path import expanduser
import platform
class ODpretrainedModels():
def __init__(self, location=None):
if location:
if isinstance(location, Path):
self.pretrained_models_location = location.as_posix()
else:
self.pretrained_models_location = location
else:
p = subprocess.run([sys.executable, "-m", "pip","show","AION"],capture_output=True, text=True)
if p.returncode == 0:
Output = p.stdout.split('\\n')
for x in Output:
y = x.split(':',1)
if(y[0]=='Location'):
self.pretrained_models_location = y[1].strip()+"/AION/pretrained_models/object_detection"
break
if Path(self.pretrained_models_location).is_dir():
self.config_file_location = self.pretrained_models_location+'/supported_models.json'
with open(self.config_file_location) as json_data:
self.supportedModels = json.load(json_data)
home = expanduser("~")
if platform.system() == 'Windows':
self.modelsPath = os.path.join(home,'AppData','Local','HCLT','AION','PreTrainedModels','ObjectDetection')
else:
self.modelsPath = os.path.join(home,'HCLT','AION','PreTrainedModels','ObjectDetection')
if os.path.isdir(self.modelsPath) == False:
os.makedirs(self.modelsPath)
def __save_config(self):
with open(self.config_file_location, 'w') as json_file:
json.dump(self.supportedModels, json_file)
def __download(self, modelName):
try:
url = self.supportedModels[modelName]["url"]
file = self.supportedModels[modelName]["file"]
local_file_path = Path(self.modelsPath)/(file+".tar.gz")
urllib.request.urlretrieve(url, local_file_path)
except:
raise ValueError("{} model download error, check your internet connection".format(modelName))
return local_file_path
def __extract(self, modelName, file_location, extract_dir):
try:
tarFile = tarfile.open(file_location)
tarFile.extractall(extract_dir)
tarFile.close()
Path.unlink(file_location)
return True
except:
return False
def download(self, modelName):
if modelName in list(self.supportedModels.keys()):
p = Path(self.modelsPath).glob('**/*')
modelsDownloaded = [x.name for x in p if x.is_dir()]
if self.supportedModels[modelName]['file'] not in modelsDownloaded:
file = self.__download(modelName)
self.supportedModels[modelName]["downloaded"] = True
if self.__extract(modelName, file, self.modelsPath):
self.supportedModels[modelName]["extracted"] = True
self.__save_config()
else:
self.__save_config()
raise ValueError("{} model downloaded but extraction failed,please try again".format(modelName))
else:
raise ValueError("{} is not supported for object detection".format(modelName))
return self.supportedModels[modelName]
def get_info(self,modeltype):
models_info = {}
p = Path(self.pretrained_models_location)
downloaded_models = [x.name for x in p.iterdir() if x.is_dir()]
for model in list(self.supportedModels.keys()):
if (self.supportedModels[model]['type'] == modeltype) or (modeltype == ''):
models_info[model] = self.supportedModels[model]['extracted']
return models_info
def is_model_exist(self, model_name):
models = self.get_info('')
status = "NOT_SUPPORTED"
if model_name in models:
if self.supportedModels[model_name]['extracted']:
status = "READY"
else:
status = "NOT_READY"
return status
def clear_config(self, model_name):
self.supportedModels[model_name]['extracted'] = False
self.supportedModels[model_name]['downloaded'] = False
self.__save_config()
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import numpy as np
import os
import sys
import string
import spacy
#import en_core_web_sm
from spacy.lang.en.stop_words import STOP_WORDS
from spacy.lang.en import English
try:
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
except:
from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS
from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer
from sklearn.base import TransformerMixin
from nltk.stem import WordNetLemmatizer
import re
from collections import defaultdict
from nltk.corpus import wordnet as wn
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelBinarizer
from nltk.tokenize import word_tokenize
from nltk import pos_tag
from nltk.corpus import stopwords
class textDataProfiler():
def __init__(self):
self.data=None
#self.nlp=en_core_web_sm.load()
self.punctuations = string.punctuation
self.stopwords = list(STOP_WORDS)
def startTextProfiler(self,df,target):
try:
dataColumns = list(df.columns)
print(' \\n No of rows and columns in dataFrame',df.shape)
print('\\n features in dataFrame',dataColumns)
dataFDtypes=self.dataFramecolType(df)
print('\\n feature types in dataFrame',dataFDtypes)
trainX=df['text']
trainY=df[target]
return trainX,trainY
except Exception as inst:
print('startTextProfiler code execution failed !....',inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(exc_type, fname, exc_tb.tb_lineno)
def dataFramecolType(self,dataFrame):
dataFDtypes=[]
try:
dataColumns=list(dataFrame.columns)
for i in dataColumns:
dataType=dataFrame[i].dtypes
dataFDtypes.append(tuple([i,str(dataType)]))
return dataFDtypes
except Exception as e:
print("error in dataFramecolyType",e)
return dataFDtypes
def textTokenizer(self,text):
try:
parser = English()
tokens = parser(text)
tokens = [ word.lemma_.lower().strip() if word.lemma_ != "-PRON-" else word.lower_ for word in tokens ]
tokens = [ word for word in tokens if word not in self.stopwords and word not in self.punctuations ]
return tokens
except Exception as inst:
print('textDataProfiler code execution failed !....',inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(exc_type, fname, exc_tb.tb_lineno)
return {}
def cleanText(self,text):
try:
text=str(text).strip().lower()
for punctuation in string.punctuation:
text = text.replace(punctuation, '')
return text
except Exception as inst:
print('cleanText code execution failed !....',inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(exc_type, fname, exc_tb.tb_lineno)
def textTokenization(self,text):
try:
tokenizedText=word_tokenize(text)
return tokenizedText
except Exception as inst:
print('textDataProfiler code execution failed !....',inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(exc_type, fname, exc |
_tb.tb_lineno)
return {}
def textLemmitizer(self,text):
try:
tag_map = defaultdict(lambda : wn.NOUN)
tag_map['J'] = wn.ADJ
tag_map['V'] = wn.VERB
tag_map['R'] = wn.ADV
Final_words = []
word_Lemmatized = WordNetLemmatizer()
for word, tag in pos_tag(text):
if word not in stopwords.words('english') and word.isalpha():
word_Final = word_Lemmatized.lemmatize(word,tag_map[tag[0]])
Final_words.append(word_Final)
return str(Final_words)
except Exception as inst:
print('textLemmitizer code execution failed !....',inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(exc_type, fname, exc_tb.tb_lineno)
return {}
class TextCleaner(TransformerMixin):
def clean_text(self,text):
try:
text=str(text).strip().lower()
text = text.replace("isn't", "is not")
text = text.replace("aren't", "are not")
text = text.replace("ain't", "am not")
text = text.replace("won't", "will not")
text = text.replace("didn't", "did not")
text = text.replace("shan't", "shall not")
text = text.replace("haven't", "have not")
text = text.replace("hadn't", "had not")
text = text.replace("hasn't", "has not")
text = text.replace("don't", "do not")
text = text.replace("wasn't", "was not")
text = text.replace("weren't", "were not")
text = text.replace("doesn't", "does not")
text = text.replace("'s", " is")
text = text.replace("'re", " are")
text = text.replace("'m", " am")
text = text.replace("'d", " would")
text = text.replace("'ll", " will")
text = re.sub(r'^https?:\\/\\/.*[\\r\\n]*', ' ', text, flags=re.MULTILINE)
text = re.sub(r'[\\w\\.-]+@[\\w\\.-]+', ' ', text, flags=re.MULTILINE)
for punctuation in string.punctuation:
text = text.replace(punctuation,' ')
text = re.sub(r'[^A-Za-z0-9\\s]',r' ',text)
text = re.sub(r'\\n',r' ',text)
text = re.sub(r'[0-9]',r' ',text)
wordnet_lemmatizer = WordNetLemmatizer()
text = " ".join([wordnet_lemmatizer.lemmatize(w, pos='v') for w in text.split()])
return text
except Exception as inst:
print('TextCleaner clean_text code execution failed !....',inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(exc_type, fname, exc_tb.tb_lineno)
def text_cleaner(self,text):
text = self.clean_text(text)
stop_words = set(stopwords.words('english'))
text_tokens = word_tokenize(text)
out=' '.join(str(j) for j in text_tokens if j not in stop_words and (len(j)!=1))
return(out)
def transform(self, X, **transform_params):
# Cleaning Text
return [self.clean_text(text) for text in X]
def fit(self, X, y=None, **fit_params):
return self
def get_params(self, deep=True):
return {}<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import glob
import pandas as pd
import io
import xml.etree.ElementTree as ET
import argparse
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging (1)
import tensorflow as tf
from PIL import Image
from object_detection.utils import dataset_util, label_map_util
from collections import namedtuple
from pathlib import Path
def class_text_to_int(row_label, label_map_dict):
return label_map_dict[row_label]
def split(df, group):
data = namedtuple('data', ['File', 'object'])
gb = df.groupby(group)
return [data(File, gb.get_group(x)) for File, x in zip(gb.groups.keys(), gb.groups)]
def create_tf_example(group, path, label_map_dict):
with tf.io.gfile.GFile(os.path.join(path, '{}'.format(group.File)), 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
width, height = image.size
File = group.File.encode('utf8')
image_format = b'jpg'
xmins = []
xmaxs = []
ymins = []
ymaxs = []
classes_text = []
classes = []
for index, row in group.object.iterrows():
xmin_n = min(row['xmin'], row['xmax'])
xmax_n = max(row['xmin'], row['xmax'])
ymin_n = min(row['ymin'], row['ymax'])
ymax_n = max(row['ymin'], row['ymax'])
xmin_new = min(xmin_n, width)
xmax_new = min(xmax_n, width)
ymin_new = min(ymin_n, height)
ymax_new = min(ymax_n, height)
xmn = xmin_new / width
xmins.append(xmn)
xmx = xmax_new / width
xmaxs.append(xmx)
ymn = ymin_new / height
ymins.append(ymn)
ymx = ymax_new / height
ymaxs.append(ymx)
classes_text.append(row['Label'].encode('utf8'))
classes.append(class_text_to_int(row['Label'], label_map_dict))
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(File),
'image/source_id': dataset_util.bytes_feature(File),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
}))
return tf_example
def labelFile(classes_names, label_map_path):
pbtxt_content = ""
for i, class_name in enumerate(classes_names):
pbtxt_content = (
pbtxt_content
+ "item {{\\n id: {0}\\n name: '{1}'\\n}}\\n\\n".format(i + 1, class_name)
)
pbtxt_content = pbtxt_content.strip()
with open(label_map_path, "w") as f:
f.write(pbtxt_content)
def createLabelFile(train_df, save_path):
labelmap_path = str(Path(save_path)/ 'label_map.pbtxt')
classes_names = sorted(train_df['Label'].unique().tolist())
labelFile(classes_names, labelmap_path)
return labelmap_path, len(classes_names)
def generate_TF_record(image_dir, output_dir, train_df, test_df, labelmap_path):
outputPath = str(Path(output_dir)/ 'train.tfrecord')
writer = tf.io.TFRecordWriter( outputPath)
grouped = split(train_df, 'File')
label_map = label_map_util.load_labelmap(labelmap_path )
label_map_dict = label_map_util.get_label_map_dict(label_map)
for group in grouped:
tf_example = create_tf_example(group, image_dir, label_map_dict)
writer.write(tf_example.SerializeToString())
writer.close()
if len(test_df):
outputPath = str(Path(output_dir)/ 'test.tfrecord')
writer = tf.io.TFRecordWriter( outputPath)
grouped = split(test_df, 'File')
for group in grouped:
tf_example = create_tf_example(group, image_dir, label_map_dict)
writer.write(tf_example.SerializeToString())
writer.close()
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
#from sklearn.externals import joblib
import joblib
# import pyreadstat
# import sys
# import math
import time
import pandas as pd
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score
from sklearn.metrics import r2_score,mean_absolute_error,mean_squared_error
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.svm import SVC
from sklearn.linear_model import LinearRegression
import argparse
import json
import os
import pathlib
from tensorflow.keras.models import load_model
# from tensorflow.keras import backend as K
import tensorflow as tf
# from sklearn.decomposition import LatentDirichletAllocation
from pathlib import Path
#from aionUQ import aionUQ
from uq_main import aionUQ
import os
from datetime import datetime
from sklearn.model_selection import train_test_split
parser = argparse.ArgumentParser()
parser.add_argument('savFile')
parser.add_argument('csvFile')
parser.add_argument('features')
parser.add_argument('target')
args = parser.parse_args()
from appbe.dataPath import DEPLOY_LOCATION
if ',' in args.features:
args.features = [x.strip() for x in args.features.split(',')]
else:
args.features = args.features.split(",")
models = args.savFile
if Path(models).is_file():
# if Path(args.savFile.is_file()):
model = joblib.load(args.savFile)
# print(model.__class__.__name__)
# print('class:',model.__class__)
# print(type(model).__name__)
# try:
# print('Classess=',model.classes_)
# except:
# print("Classess=N/A")
# print('params:',model.get_params())
# try:
# print('fea_imp =',model.feature_importances_)
# except:
# print("fea_imp =N/A")
ProblemName = model.__class__.__name__
Params = model.get_params()
# print("ProblemName: \\n",ProblemName)
# print("Params: \\n",Params)
# print('ProblemName:',model.__doc__)
# print(type(ProblemName))
if ProblemName in ['LogisticRegression','SGDClassifier','SVC','DecissionTreeClassifier','RandomForestClassifier','GaussianNB','KNeighboursClassifier','DecisionTreeClassifier','GradientBoostingClassifier']:
Problemtype = 'Classification'
else :
Problemtype = 'Regression'
if Problemtype == 'Classification':
df = pd.read_csv(args.csvFile)
object_cols = [col for col, col_type in df.dtypes.items() if col_type == 'object']
df = df.drop(object_cols, axis=1)
df = df.dropna(axis=1)
df = df.reset_index(drop=True)
modelfeatures = args.features
# dfp = df[modelfeatures]
tar = args.target
# target = df[tar]
y=df[tar]
X = df.drop(tar, axis=1)
#for dummy test,train values pass
X_train, X_test, y_train, y_test = train_test_ |
split(X, y, test_size=0.3, random_state=0)
uqObj=aionUQ(df,X,y,ProblemName,Params,model,modelfeatures,tar)
#accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertaitny_per=uqObj.uqMain_BBMClassification(X_train, X_test, y_train, y_test,"uqtest")
accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertaitny_per=uqObj.uqMain_BBMClassification()
# print("UQ Classification: \\n",output_jsonobject)
print(accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertaitny_per)
print("End of UQ Classification.\\n")
else:
df = pd.read_csv(args.csvFile)
modelfeatures = args.features
# print("modelfeatures: \\n",modelfeatures)
# print("type modelfeatures: \\n",type(modelfeatures))
dfp = df[modelfeatures]
tar = args.target
target = df[tar]
#Not used, just dummy X,y split
y=df[tar]
X = df.drop(tar, axis=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
uqObj=aionUQ(df,dfp,target,ProblemName,Params,model,modelfeatures,tar)
total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject=uqObj.uqMain_BBMRegression()
print(total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject)
print("End of UQ reg\\n")
elif Path(models).is_dir():
os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
model = load_model(models)
ProblemName = model.__class__.__name__
Problemtype = 'Classification'
# print('class:',model.__class__)
# print('class1',model.__class__.__name__)
# print(model.summary())
# print('ProblemName1:',model.get_config())
def Params(model: tf.keras.Model):
Params = []
model.Params(print_fn=lambda x: Params.append(x))
return '\\n'.join(Params)
df = pd.read_csv(args.csvFile)
modelfeatures = args.features
dfp = df[modelfeatures]
tar = args.target
target = df[tar]
df3 = dfp.astype(np.float32)
predic = model.predict(df3)
if predic.shape[-1] > 1:
predic = np.argmax(predic, axis=-1)
else:
predic = (predic > 0.5).astype("int32")
matrixconfusion = pd.DataFrame(confusion_matrix(predic,target))
matrixconfusion = matrixconfusion.to_json(orient='index')
classificationreport = pd.DataFrame(classification_report(target,predic,output_dict=True)).transpose()
classificationreport = round(classificationreport,2)
classificationreport = classificationreport.to_json(orient='index')
output = {}
output["Precision"] = "%.3f" % precision_score(target, predic,average='weighted')
output["Recall"] = "%.3f" % recall_score(target, predic,average='weighted')
output["Accuracy"] = "%.3f" % accuracy_score(target, predic)
output["ProblemName"] = ProblemName
output["Params"] = Params
output["Problemtype"] = Problemtype
output["Confusionmatrix"] = matrixconfusion
output["classificationreport"] = classificationreport
print(json.dumps(output))
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import logging
logging.getLogger('tensorflow').disabled = True
import json
#from nltk.corpus import stopwords
from collections import Counter
from matplotlib import pyplot
import sys
import os
import json
import matplotlib.pyplot as plt
from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelRegression
from uq360.algorithms.ucc_recalibration import UCCRecalibration
from sklearn import datasets
from sklearn.model_selection import train_test_split
import pandas as pd
from uq360.metrics.regression_metrics import compute_regression_metrics
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_curve
# from math import sqrt
from sklearn.metrics import r2_score,mean_squared_error, explained_variance_score,mean_absolute_error
# from uq360.metrics import picp, mpiw, compute_regression_metrics, plot_uncertainty_distribution, plot_uncertainty_by_feature, plot_picp_by_feature
from uq360.metrics import plot_uncertainty_by_feature, plot_picp_by_feature
#Added libs from MLTest
import sys
import time
from sklearn.metrics import confusion_matrix
from pathlib import Path
import logging
# import json
class aionUQ:
# def __init__(self,uqdf,targetFeature,xtrain,ytrain,xtest,ytest,uqconfig_base,uqconfig_meta,deployLocation,saved_model):
def __init__(self,df,dfp,target,ProblemName,Params,model,modelfeatures,targetfeature,deployLocation):
# #printprint("Inside aionUQ \\n")
try:
#print("Inside aionUQ init\\n ")
self.data=df
self.dfFeatures=dfp
self.uqconfig_base=Params
self.uqconfig_meta=Params
self.targetFeature=targetfeature
self.target=target
self.selectedfeature=modelfeatures
self.y=self.target
self.X=self.dfFeatures
self.log = logging.getLogger('eion')
self.basemodel=model
self.model_name=ProblemName
self.Deployment = os.path.join(deployLocation,'log','UQ')
os.makedirs(self.Deployment,exist_ok=True)
self.uqgraphlocation = os.path.join(self.Deployment,'UQgraph')
os.makedirs(self.uqgraphlocation,exist_ok=True)
except Exception as e:
self.log.info('<!------------- UQ model INIT Error ---------------> '+str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def totalUncertainty(self,df,basemodel,model_params,xtrain, xtest, ytrain, ytest,aionstatus):
from sklearn.model_selection import train_test_split
# To get each class values and uncertainty
if (aionstatus.lower() == 'aionuq'):
X_train, X_test, y_train, y_test = xtrain, xtest, ytrain, ytest
# y_val = y_train.append(y_test)
else:
# y_val = self.y
df=self.data
y=df[self.targetFeature]
X = df.drop(self.targetFeature, axis=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
key = 'criterion'
#if key in model_params:
try:
#if model_params.has_key(key):
if key in model_params:
if (model_params['criterion']):
uq_scoring_param=model_params.get('criterion')
elif(model_params['criterion'] == None):
uq_scoring_param='picp'
else:
uq_scoring_param='picp'
else:
uq_scoring_param='picp'
pass
except Exception as inst:
uq_scoring_param='picp'
# from sklearn.tree import DecisionTreeRegressor
# from sklearn.linear_model import LinearRegression,Lasso,Ridge
# from sklearn import linear_model
# from sklearn.ensemble import RandomForestRegressor
if uq_scoring_param in ['rmse', 'nll','auucc_gain','picp','mpiw','r2']:
uq_scoring_param=uq_scoring_param
else:
uq_scoring_param='picp'
uq_model = BlackboxMetamodelRegression(base_model=basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params)
# this will fit both the base and the meta model
uqmodel_fit = uq_model.fit(X_train, y_train)
y_hat, y_hat_lb, y_hat_ub = uq_model.predict(X_test)
y_hat_total_mean=np.mean(y_hat)
y_hat_lb_total_mean=np.mean(y_hat_lb)
y_hat_ub_total_mean=np.mean(y_hat_ub)
mpiw_20_per=(y_hat_total_mean*20/100)
mpiw_lower_range = y_hat_total_mean - mpiw_20_per
mpiw_upper_range = y_hat_total_mean + mpiw_20_per
from uq360.metrics import picp, mpiw
observed_alphas_picp = picp(y_test, y_hat_lb, y_hat_ub)
observed_widths_mpiw = mpiw(y_hat_lb, y_hat_ub)
observed_alphas_picp=round(observed_alphas_picp,2)
observed_widths_mpiw=round(observed_widths_mpiw,2)
picp_percentage= round(observed_alphas_picp*100)
Uncertainty_percentage=round(100-picp_percentage)
self.log.info('Model total observed_widths_mpiw : '+str(observed_widths_mpiw))
self.log.info('Model mpiw_lower_range : '+str(mpiw_lower_range))
self.log.info('Model mpiw_upper_range : '+str(mpiw_upper_range))
self.log.info('Model total picp_percentage : '+str(picp_percentage))
return observed_alphas_picp,observed_widths_mpiw,picp_percentage,Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range
def display_results(self,X_test, y_test, y_mean, y_lower, y_upper):
try:
global x_feature,y_feature
if (isinstance(self.selectedfeature, list) or isinstance(self.selectedfeature, tuple)):
x_feature=''.join(map(str, self.selectedfeature))
else:
x_feature= str(self.selectedfeature)
# self.selectedfeature=str(self.selectedfeature)
X_test=np.squeeze(X_test)
y_feature=str(self.targetFeature)
pred_dict = {x_feature: X_test,
'y': y_test,
'y_mean': y_mean,
'y_upper': y_upper,
'y_lower': y_lower
}
pred_df = pd.DataFrame(data=pred_dict)
pred_df_sorted = pred_df.sort_values(by=x_feature)
plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y'], 'o', label='Observed')
plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y_mean'], '-', lw=2, label='Predicted')
plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y_upper'], 'r--', lw=2, label='Upper Bound')
plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y_lower'], 'r--', lw=2, label='Lower Bound')
plt.legend()
plt.xlabel(x_feature)
plt.ylabel(y_feature)
plt.title('UQ Confidence Interval Plot.')
# plt.savefig('uq_test_plt.png')
if os.path.exists(str(self.uqgraphlocation)+'/uq_test_plt.png'):
os.remove(str(self.uqgraphlocation)+'/uq_test_plt.png')
plt.savefig(str(self.Deployment)+'/uq_test_plt.png')
plt.savefig(str(self.uqgraphlocation)+'/uq_test_plt.png')
plt.clf()
plt.cla()
|
plt.close()
pltreg=plot_picp_by_feature(X_test, y_test,
y_lower, y_upper,
xlabel=x_feature)
#pltreg.savefig('x.png')
pltr=pltreg.figure
if os.path.exists(str(self.uqgraphlocation)+'/picp_per_feature.png'):
os.remove(str(self.uqgraphlocation)+'/picp_per_feature.png')
pltr.savefig(str(self.Deployment)+'/picp_per_feature.png')
pltr.savefig(str(self.uqgraphlocation)+'/picp_per_feature.png')
plt.clf()
plt.cla()
plt.close()
except Exception as e:
# #print("display exception: \\n",e)
self.log.info('<!------------- UQ model Display Error ---------------> '+str(e))
def classUncertainty(self,pred,score):
try:
outuq = {}
classes = np.unique(pred)
for c in classes:
ids = pred == c
class_score = score[ids]
predc = 'Class_'+str(c)
outuq[predc]=np.mean(class_score)
x = np.mean(class_score)
#Uncertaininty in percentage
x=x*100
self.log.info('----------------> Class '+str(c)+' Confidence Score '+str(round(x)))
return outuq
except Exception as e:
# #print("display exception: \\n",e)
self.log.info('<!------------- UQ classUncertainty Error ---------------> '+str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def uqMain_BBMClassification(self,x_train, x_test, y_train, y_test,aionstatus):
try:
# print("Inside uqMain_BBMClassification\\n")
# print("lenth of x_train {}, x_test {}, y_train {}, y_test {}".format(x_train, x_test, y_train, y_test))
aionstatus = str(aionstatus)
if (aionstatus.lower() == 'aionuq'):
X_train, X_test, y_train, y_test = x_train, x_test, y_train, y_test
else:
X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=0.3, random_state=0)
from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelClassification
from uq360.metrics.classification_metrics import plot_reliability_diagram,area_under_risk_rejection_rate_curve,plot_risk_vs_rejection_rate,expected_calibration_error,compute_classification_metrics
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from sklearn.neighbors import KNeighborsClassifier
base_modelname=__class__.__name__
base_config = self.uqconfig_base
meta_config = self.uqconfig_base
model_name=self.basemodel.__class__.__name__
#print(model_name)
try:
#geting used features
model_used_features=self.basemodel.feature_names_in_
self.log.info("Base model used training features are (UQ Testing): \\n"+str(model_used_features))
except:
pass
model_params=self.basemodel.get_params()
uq_scoring_param='accuracy'
basemodel=None
if (model_name == "GradientBoostingClassifier"):
basemodel=GradientBoostingClassifier
elif (model_name == "SGDClassifier"):
basemodel=SGDClassifier
elif (model_name == "GaussianNB"):
basemodel=GaussianNB
elif (model_name == "DecisionTreeClassifier"):
basemodel=DecisionTreeClassifier
elif(model_name == "RandomForestClassifier"):
basemodel=RandomForestClassifier
elif (model_name == "SVC"):
basemodel=SVC
elif(model_name == "KNeighborsClassifier"):
basemodel=KNeighborsClassifier
elif(model_name.lower() == "logisticregression"):
basemodel=LogisticRegression
elif(model_name == "XGBClassifier"):
basemodel=XGBClassifier
elif(model_name == "LGBMClassifier"):
basemodel=LGBMClassifier
else:
basemodel=LogisticRegression
calibrated_mdl=None
if (model_name == "SVC"):
from sklearn.calibration import CalibratedClassifierCV
basemodel=SVC(**model_params)
calibrated_mdl = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3)
calibrated_mdl.fit(X_train, y_train)
basepredict = calibrated_mdl.predict(X_test)
predprob_base = calibrated_mdl.predict_proba(X_test)[:, :]
elif (model_name == "SGDClassifier"):
from sklearn.calibration import CalibratedClassifierCV
basemodel=SGDClassifier(**model_params)
calibrated_mdl = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3)
calibrated_mdl.fit(X_train, y_train)
basepredict = calibrated_mdl.predict(X_test)
predprob_base = calibrated_mdl.predict_proba(X_test)[:, :]
else:
from sklearn.calibration import CalibratedClassifierCV
base_mdl = basemodel(**model_params)
calibrated_mdl = CalibratedClassifierCV(base_mdl,method='sigmoid',cv=3)
basemodelfit = calibrated_mdl.fit(X_train, y_train)
basepredict = calibrated_mdl.predict(X_test)
predprob_base=calibrated_mdl.predict_proba(X_test)[:, :]
cal_model_params=calibrated_mdl.get_params()
acc_score_base=accuracy_score(y_test, basepredict)
base_estimator_calibrate = cal_model_params['base_estimator']
uq_model = BlackboxMetamodelClassification(base_model=self.basemodel, meta_model=basemodel,
base_config=model_params, meta_config=model_params)
try:
X_train=X_train[model_used_features]
X_test=X_test[model_used_features]
except:
pass
uqmodel_fit = uq_model.fit(X_train, y_train,base_is_prefitted=True,meta_train_data=(X_train, y_train))
# uqmodel_fit = uq_model.fit(X_train, y_train)
y_t_pred, y_t_score = uq_model.predict(X_test)
acc_score=accuracy_score(y_test, y_t_pred)
test_accuracy_perc=round(100*acc_score)
if(aionstatus == "aionuq"):
test_accuracy_perc=round(test_accuracy_perc,2)
#uq_aurrrc not used for any aion gui configuration, so it initialized as 0. if we use area_under_risk_rejection_rate_curve(), it shows plot in cmd prompt,so code execution interuupted.so we make it 0.
uq_aurrrc=0
pass
else:
bbm_c_plot = plot_risk_vs_rejection_rate(
y_true=y_test,
y_prob=predprob_base,
selection_scores=y_t_score,
y_pred=y_t_pred,
plot_label=['UQ_risk_vs_rejection'],
risk_func=accuracy_score,
num_bins = 10 )
# This done by kiran, need to uncomment for GUI integration.
# bbm_c_plot_sub = bbm_c_plot[4]
bbm_c_plot_sub = bbm_c_plot
if os.path.exists(str(self.uqgraphlocation)+'/plot_risk_vs_rejection_rate.png'):
os.remove(str(self.uqgraphlocation)+'/plot_risk_vs_rejection_rate.png')
# bbm_c_plot_sub.savefig(str(self.uqgraphlocation)+'/plot_risk_vs_rejection_rate.png')
re_plot=plot_reliability_diagram(y_true=y_test,
y_prob=predprob_base,
y_pred=y_t_pred,
plot_label=['UQModel reliability_diagram'],
num_bins=10 )
# This done by kiran, need to uncomment for GUI integration.
# re_plot_sub = re_plot[4]
re_plot_sub = re_plot
if os.path.exists(str(self.uqgraphlocation)+'/plot_reliability_diagram.png'):
os.remove(str(self.uqgraphlocation)+'/plot_reliability_diagram.png')
# re_plot_sub.savefig(str(DEFAULT_FILE_PATH)+'/plot_reliability_diagram.png')
uq_aurrrc=area_under_risk_rejection_rate_curve( y_true=y_test,
y_prob=predprob_base,
y_pred=y_t_pred,
selection_scores=y_t_score,
attributes=None,
risk_func=accuracy_score,subgroup_ids=None, return_counts=False,
num_bins=10)
uq_aurrrc=uq_aurrrc
test_accuracy_perc=round(test_accuracy_perc)
#metric_all=compute_classification_metrics(y_test, y_prob, option='all')
metric_all=compute_classification_metrics(y_test, predprob_base, option='accuracy')
#expected_calibration_error
uq_ece=expected_calibration_error(y_test, y_prob=predprob_base,y_pred=basepredict, num_bins=10, return_counts=False)
# uq_aurrrc=uq_aurrrc
confidence_score=acc_score_base-uq_ece
ece_confidence_score=round(confidence_score,2)
# Model uncertainty using ECE score
# model_uncertainty_ece = 1-ece_confidence_score
#Uncertainty Using model inherent predict probability
mean_predprob_total=np.mean(y_t_score)
model_confidence=mean_predprob_total
model_uncertainty = 1-mean_predprob_total
model_confidence = round(model_confidence,2)
# To get each class values and uncertainty
if (aionstatus.lower() == 'aionuq'):
y_val = np.append(y_train,y_test)
else:
y_val = self.y
self.log.info('------------------> Model Confidence Score '+str(model_confidence))
outuq = self.classUncertainty(y_t_pred,y_t_score)
# Another way to get conf score
model_uncertainty_per=round((model_uncertainty*100),2)
model_confidence_per=round((model_confidence*100),2)
acc_score_per = round((acc_score*100),2)
uq_ece_per=round((uq_ece*100),2)
output={}
recommendation = ""
if (uq_ece > 0.5):
# RED text
recommendation = 'Model has high ece (expected calibration error) score compare to threshold (0.5),not good to be deploy. need to be add more input data across all feature ranges to train base model, also try with different classification algorithms/ |
ensembling to reduce ECE (ECE~0).'
else:
# self.log.info('Model has good ECE score and accuracy, ready to deploy.\\n.')
if (uq_ece <= 0.1 and model_confidence >= 0.9):
# Green Text
recommendation = 'Model has best calibration score (near to 0) and good confidence score , ready to deploy. '
else:
# Orange
recommendation = 'Model has good ECE score (between 0.1-0.5), but less confidence score compare to threshold (90%). If user wants,model can be improve by adding more input data across all feature ranges and could be evaluate with different algorithms/ensembling. '
#Adding each class uncertainty value
classoutput = {}
for k,v in outuq.items():
classoutput[k]=(str(round((v*100),2)))
output['classes'] = classoutput
output['ModelConfidenceScore']=(str(model_confidence_per))
output['ExpectedCalibrationError']=str(uq_ece_per)
output['ModelUncertainty']=str(model_uncertainty_per)
output['Recommendation']=recommendation
# output['user_msg']='Please check the plot for more understanding of model uncertainty'
#output['UQ_area_under_risk_rejection_rate_curve']=round(uq_aurrrc,4)
output['Accuracy']=str(acc_score_per)
output['Problem']= 'Classification'
#self.log.info('Model Accuracy score in percentage : '+str(test_accuracy_perc)+str(' %'))
# #print("Prediction mean for the given model:",np.mean(y_hat),"\\n")
#self.log.info(recommendation)
#self.log.info("Model_confidence_score: " +str(confidence_score))
#self.log.info("Model_uncertainty: " +str(round(model_uncertainty,2)))
#self.log.info('Please check the plot for more understanding of model uncertainty.\\n.')
uq_jsonobject = json.dumps(output)
with open(str(self.Deployment)+"/uq_classification_log.json", "w") as f:
json.dump(output, f)
return test_accuracy_perc,uq_ece,output,model_confidence_per,model_uncertainty_per
except Exception as inst:
self.log.info('\\n < ---------- UQ Model Execution Failed Start--------->')
self.log.info('\\n<------Model Execution failed!!!.' + str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
self.log.info('\\n < ---------- Model Execution Failed End --------->')
def aion_confidence_plot(self,df):
df=df
df = df.sort_values(by=self.selectedfeature)
best_values=df.Best_values.to_list()
best_upper=df.Best__upper.to_list()
best_lower=df.Best__lower.to_list()
Total_Upper_PI=df.Total_Upper_PI.to_list()
Total_Low_PI=df.Total_Low_PI.to_list()
Obseved = df.Observed.to_list()
plt.plot(df[x_feature], df['Observed'], 'o', label='Observed')
plt.plot(df[x_feature], df['Best__upper'],'r--', lw=2, color='grey')
plt.plot(df[x_feature], df['Best__lower'],'r--', lw=2, color='grey')
plt.plot(df[x_feature], df['Best_values'], 'r--', lw=2, label='MeanPrediction',color='red')
plt.fill_between(df[x_feature], Total_Upper_PI, Total_Low_PI, label='Good Confidence', color='lightblue', alpha=.5)
plt.fill_between(df[x_feature],best_lower, best_upper,label='Best Confidence', color='orange', alpha=.5)
plt.legend()
plt.xlabel(self.selectedfeature)
plt.ylabel(self.targetFeature)
plt.title('UQ Best & Good Area Plot')
if os.path.exists(str(self.uqgraphlocation)+'/uq_confidence_plt.png'):
os.remove(str(self.uqgraphlocation)+'/uq_confidence_plt.png')
plt.savefig(str(self.uqgraphlocation)+'/uq_confidence_plt.png')
plt.savefig(str(self.Deployment)+'/uq_confidence_plt.png')
def uqMain_BBMRegression(self,x_train, x_test, y_train, y_test,aionstatus):
aionstatus = str(aionstatus)
# if (aionstatus.lower() == 'aionuq'):
# X_train, X_test, y_train, y_test = x_train, x_test, y_train, y_test
# total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,x_train, x_test, y_train, y_test,aionstatus)
# else:
# X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=0.3, random_state=0)
# modelName = ""
self.log.info('<!------------- Inside BlackBox MetaModel Regression process. ---------------> ')
try:
from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelRegression
import pandas as pd
base_modelname=__class__.__name__
base_config = self.uqconfig_base
meta_config = self.uqconfig_base
model_name=self.basemodel.__class__.__name__
model_params=self.basemodel.get_params()
# #print("model_params['criterion']: \\n",model_params['criterion'])
key = 'criterion'
#if key in model_params:
try:
#if model_params.has_key(key):
if key in model_params:
if (model_params['criterion']):
uq_scoring_param=model_params.get('criterion')
elif(model_params['criterion'] == None):
uq_scoring_param='picp'
else:
uq_scoring_param='picp'
else:
uq_scoring_param='picp'
pass
except Exception as inst:
uq_scoring_param='picp'
# modelname='sklearn.linear_model'+'.'+model_name
# X_train, X_test, y_train, y_test = self.xtrain,self.xtest,self.ytrain,self.ytest
#Geeting trained model name and to use the model in BlackboxMetamodelRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LinearRegression,Lasso,Ridge
from sklearn.ensemble import RandomForestRegressor
if (model_name == "DecisionTreeRegressor"):
basemodel=DecisionTreeRegressor
elif (model_name == "LinearRegression"):
basemodel=LinearRegression
elif (model_name == "Lasso"):
basemodel=Lasso
elif (model_name == "Ridge"):
basemodel=Ridge
elif(model_name == "RandomForestRegressor"):
basemodel=RandomForestRegressor
else:
basemodel=LinearRegression
if (aionstatus.lower() == 'aionuq'):
X_train, X_test, y_train, y_test = x_train, x_test, y_train, y_test
total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,x_train, x_test, y_train, y_test,aionstatus)
else:
X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=0.3, random_state=0)
total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,None, None, None, None,aionstatus)
if uq_scoring_param in ['rmse', 'nll','auucc_gain','picp','mpiw','r2']:
uq_scoring_param=uq_scoring_param
else:
uq_scoring_param='picp'
uq_model = BlackboxMetamodelRegression(base_model=basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params)
# this will fit both the base and the meta model
uqmodel_fit = uq_model.fit(X_train, y_train)
# #print("X_train.shape: \\n",X_train.shape)
y_hat, y_hat_lb, y_hat_ub = uq_model.predict(X_test)
from uq360.metrics import picp, mpiw
observed_alphas_picp = picp(y_test, y_hat_lb, y_hat_ub)
observed_widths_mpiw = mpiw(y_hat_lb, y_hat_ub)
picp_percentage= round(observed_alphas_picp*100)
Uncertainty_percentage=round(100-picp_percentage)
self.log.info('<!------------- observed_picp: ---------------> '+str(observed_alphas_picp))
self.log.info('<!------------- observed_widths_mpiw: ---------------> '+str(observed_widths_mpiw))
# UQ metamodel regression have metrics as follows, “rmse”, “nll”, “auucc_gain”, “picp”, “mpiw”, “r2”
#metric_all=compute_regression_metrics(y_test, y_hat,y_hat_lb, y_hat_ub,option='all',nll_fn=None) #nll - Gaussian negative log likelihood loss.
metric_all=compute_regression_metrics(y_test, y_hat,y_hat_lb, y_hat_ub,option=uq_scoring_param,nll_fn=None)
metric_used=''
for k,v in metric_all.items():
metric_used=str(round(v,2))
self.log.info('<!------------- Metric used for regression UQ: ---------------> '+str(metric_all))
# Determine the confidence level and recommentation to the tester
# test_data=y_test
observed_alphas_picp=round(observed_alphas_picp,2)
observed_widths_mpiw=round(observed_widths_mpiw,2)
#Calculate total uncertainty for all features
# total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage = self.totalUncertainty(self.data)
# df1=self.data
total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,x_train, x_test, y_train, y_test,aionstatus)
recommendation=""
output={}
if (observed_alphas_picp >= 0.95 and total_picp >= 0.75):
# Add GREEN text
self.log.info('Model has good confidence for the selected feature, ready to deploy.\\n.')
recommendation = "Model has good confidence score, ready to deploy."
elif ((observed_alphas_picp >= 0.50 and observed_alphas_picp <= 0.95) and (total_picp >= 0.50)):
# Orange
recommendation = "Model has average confidence compare to threshold (95%), need to be add more input data across all feature ranges to train base model, also try with different regression algorithms/ensembling."
self.log.info('Model has average confidence score compare to threshold, need to be add more input data for training base model and again try with UQ .')
else:
# RED text
recommendation = "Model has less confidence compare to threshold (95%), need to be add more input data across all feature ranges to train base model, also try with different regression algorithms/ensembling."
self.log.info('Model has less confidence score compare to threshold, need to be add more input data for training base model and again try with UQ .')
#Build uq json info dict
output['ModelConfidenceScore']=(str(total_picp_percentage)+'%')
output['ModelUncertainty']=(str(total_Uncertainty_percentage)+'%')
output['SelectedFeatureConfidence']=(str(picp_percentage)+'%')
output['SelectedFeatureUncertainty']=(str(Uncertainty_percentage)+'%')
output['PredictionIntervalCoverageProbability']=ob |
served_alphas_picp
output['MeanPredictionIntervalWidth']=round(observed_widths_mpiw)
output['DesirableMPIWRange: ']=(str(round(mpiw_lower_range))+str(' - ')+str(round(mpiw_upper_range)))
output['Recommendation']=str(recommendation)
output['Metric']=uq_scoring_param
output['Score']=metric_used
output['Problemtype']= 'Regression'
self.log.info('Model confidence in percentage is: '+str(picp_percentage)+str(' %'))
self.log.info('Model Uncertainty is:: '+str(Uncertainty_percentage)+str(' %'))
#self.log.info('Please check the plot for more understanding of model uncertainty.\\n.')
#self.display_results(X_test, y_test, y_mean=y_hat, y_lower=y_hat_lb, y_upper=y_hat_ub)
uq_jsonobject = json.dumps(output)
with open(str(self.Deployment)+"/uq_reg_log.json", "w") as f:
json.dump(output, f)
#To get best and medium UQ range of values from total predict interval
y_hat_m=y_hat.tolist()
y_hat_lb=y_hat_lb.tolist()
upper_bound=y_hat_ub.tolist()
y_hat_ub=y_hat_ub.tolist()
for x in y_hat_lb:
y_hat_ub.append(x)
total_pi=y_hat_ub
medium_UQ_range = y_hat_ub
best_UQ_range= y_hat.tolist()
ymean_upper=[]
ymean_lower=[]
y_hat_m=y_hat.tolist()
for i in y_hat_m:
y_hat_m_range= (i*20/100)
x=i+y_hat_m_range
y=i-y_hat_m_range
ymean_upper.append(x)
ymean_lower.append(y)
min_best_uq_dist=round(min(best_UQ_range))
max_best_uq_dist=round(max(best_UQ_range))
# initializing ranges
list_medium=list(filter(lambda x:not(min_best_uq_dist<=x<=max_best_uq_dist), total_pi))
list_best = y_hat_m
X_test = np.squeeze(X_test)
'''
uq_dict = {x_feature:X_test,'Observed':y_test,'Best_values': y_hat_m,
'Best__upper':ymean_upper,
'Best__lower':ymean_lower,
'Total_Low_PI': y_hat_lb,
'Total_Upper_PI': upper_bound,
}
print(uq_dict)
uq_pred_df = pd.DataFrame(data=uq_dict)
uq_pred_df_sorted = uq_pred_df.sort_values(by='Best_values')
uq_pred_df_sorted.to_csv(str(self.Deployment)+"/uq_pred_df.csv",index = False)
csv_path=str(self.Deployment)+"/uq_pred_df.csv"
df=pd.read_csv(csv_path)
self.log.info('uqMain() returns: observed_alphas_picp,observed_widths_mpiw,list_medium,list_best,metric_all.\\n.')
#Callconfidence olot fn only for UQTest interface
if (aionstatus.lower() == 'aionuq'):
#No need to showcase confidence plot for aion main
pass
else:
self.aion_confidence_plot(df)
'''
return total_picp_percentage,total_Uncertainty_percentage,list_medium,list_best,metric_all,json.loads(uq_jsonobject)
except Exception as inst:
exc = {"status":"FAIL","message":str(inst).strip('"')}
out_exc = json.dumps(exc)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import logging
logging.getLogger('tensorflow').disabled = True
import json
#from nltk.corpus import stopwords
from collections import Counter
from matplotlib import pyplot
import sys
import os
import matplotlib.pyplot as plt
from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelRegression
from sklearn import datasets
from sklearn.model_selection import train_test_split
import pandas as pd
from uq360.metrics.regression_metrics import compute_regression_metrics
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_curve
from sklearn.metrics import r2_score,mean_squared_error, explained_variance_score,mean_absolute_error
from uq360.metrics import plot_uncertainty_by_feature, plot_picp_by_feature
import sys
import time
from sklearn.metrics import confusion_matrix
from pathlib import Path
import logging
import logging.config
from os.path import expanduser
import platform
from sklearn.utils import shuffle
class aionUQ:
# def __init__(self,uqdf,targetFeature,xtrain,ytrain,xtest,ytest,uqconfig_base,uqconfig_meta,deployLocation,saved_model):
def __init__(self,df,dfp,target,ProblemName,Params,model,modelfeatures,targetfeature):
try:
self.data=df
self.dfFeatures=dfp
self.uqconfig_base=Params
self.uqconfig_meta=Params
self.targetFeature=targetfeature
self.log = logging.getLogger('aionUQ')
self.target=target
self.selectedfeature=modelfeatures
self.y=self.target
self.X=self.dfFeatures
from appbe.dataPath import DEPLOY_LOCATION
self.Deployment = os.path.join(DEPLOY_LOCATION,('UQTEST_'+str(int(time.time()))))
os.makedirs(self.Deployment,exist_ok=True)
self.basemodel=model
self.model_name=ProblemName
# self.X, self.y = shuffle(self.X, self.y)
X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=0.2, random_state=0)
self.xtrain = X_train
self.xtest = X_test
self.ytrain = y_train
self.ytest = y_test
# self.deployLocation=deployLocation
except Exception as e:
# self.log.info('<!------------- UQ model INIT Error ---------------> '+str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
# self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def totalUncertainty(self,df,basemodel,model_params):
try:
# from sklearn.model_selection import train_test_split
# df=self.data
# y=df[self.targetFeature]
# X = df.drop(self.targetFeature, axis=1)
if (isinstance(self.selectedfeature,list)):
selectedfeature=[self.selectedfeature[0]]
selectedfeature=' '.join(map(str,selectedfeature))
if (isinstance(self.targetFeature,list)):
targetFeature=[self.targetFeature[0]]
targetFeature=' '.join(map(str,targetFeature))
X = self.data[selectedfeature]
y = self.data[targetFeature]
X = X.values.reshape((-1,1))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
key = 'criterion'
#if key in model_params:
try:
#if model_params.has_key(key):
if key in model_params:
if (model_params['criterion']):
uq_scoring_param=model_params.get('criterion')
elif(model_params['criterion'] == None):
uq_scoring_param='picp'
else:
uq_scoring_param='picp'
else:
uq_scoring_param='picp'
pass
except Exception as inst:
uq_scoring_param='picp'
# from sklearn.tree import DecisionTreeRegressor
# from sklearn.linear_model import LinearRegression,Lasso,Ridge
# from sklearn import linear_model
# from sklearn.ensemble import RandomForestRegressor
if uq_scoring_param in ['rmse', 'nll','auucc_gain','picp','mpiw','r2']:
uq_scoring_param=uq_scoring_param
else:
uq_scoring_param='picp'
uq_model = BlackboxMetamodelRegression(base_model=basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params)
# this will fit both the base and the meta model
uqmodel_fit = uq_model.fit(X_train, y_train)
y_hat, y_hat_lb, y_hat_ub = uq_model.predict(X_test)
y_hat_total_mean=np.mean(y_hat)
y_hat_lb_total_mean=np.mean(y_hat_lb)
y_hat_ub_total_mean=np.mean(y_hat_ub)
mpiw_20_per=(y_hat_total_mean*20/100)
mpiw_lower_range = y_hat_total_mean - mpiw_20_per
mpiw_upper_range = y_hat_total_mean + mpiw_20_per
from uq360.metrics import picp, mpiw
observed_alphas_picp = picp(y_test, y_hat_lb, y_hat_ub)
observed_widths_mpiw = mpiw(y_hat_lb, y_hat_ub)
observed_alphas_picp=round(observed_alphas_picp,2)
observed_widths_mpiw=round(observed_widths_mpiw,2)
picp_percentage= round(observed_alphas_picp*100)
Uncertainty_percentage=round(100-picp_percentage)
# self.log.info('Model total observed_widths_mpiw : '+str(observed_widths_mpiw))
# self.log.info('Model mpiw_lower_range : '+str(mpiw_lower_range))
# self.log.info('Model mpiw_upper_range : '+str(mpiw_upper_range))
# self.log.info('Model total picp_percentage : '+str(picp_percentage))
except Exception as e:
print("totalUncertainty fn error: \\n",e)
return observed_alphas_picp,observed_widths_mpiw,picp_percentage,Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range
def display_results(self,X_test, y_test, y_mean, y_lower, y_upper):
try:
global x_feature,y_feature
if (isinstance(self.selectedfeature, list) or isinstance(self.selectedfeature, tuple)):
x_feature=','.join(map(str, self.selectedfeature))
else:
x_feature= str(self.selectedfeature)
# self.selectedfeature=str(self.selectedfeature)
X_test=np.squeeze(X_test)
y_feature=str(self.targetFeature)
pred_dict = {x_feature: X_test,
'y': y_test,
'y_mean': y_mean,
'y_upper': y_upper,
'y_lower': y_lower
}
pred_df = pd.DataFrame(data=pred_dict)
x_feature1 = x_feature.split(',')
pred_df_sorted = pred_df.sort_values(by=x_feature1)
plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted[' |
y'], 'o', label='Observed')
plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted['y_mean'], '-', lw=2, label='Predicted')
plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted['y_upper'], 'r--', lw=2, label='Upper Bound')
plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted['y_lower'], 'r--', lw=2, label='Lower Bound')
plt.legend()
plt.xlabel(x_feature1[0])
plt.ylabel(y_feature)
plt.title('UQ Confidence Interval Plot.')
# plt.savefig('uq_test_plt.png')
'''
if os.path.exists(str(DEFAULT_FILE_PATH)+'/uq_test_plt.png'):
os.remove(str(DEFAULT_FILE_PATH)+'/uq_test_plt.png')
'''
plt.savefig(str(self.Deployment)+'/uq_test_plt.png')
#plt.savefig(str(DEFAULT_FILE_PATH)+'/uq_test_plt.png')
confidencePlot = os.path.join(self.Deployment,'picp_per_feature.png')
plt.clf()
plt.cla()
plt.close()
pltreg=plot_picp_by_feature(X_test, y_test,
y_lower, y_upper,
xlabel=x_feature)
#pltreg.savefig('x.png')
pltr=pltreg.figure
'''
if os.path.exists(str(DEFAULT_FILE_PATH)+'/picp_per_feature.png'):
os.remove(str(DEFAULT_FILE_PATH)+'/picp_per_feature.png')
'''
pltr.savefig(str(self.Deployment)+'/picp_per_feature.png')
picpPlot = os.path.join(self.Deployment,'picp_per_feature.png')
#pltr.savefig(str(DEFAULT_FILE_PATH)+'/picp_per_feature.png')
plt.clf()
plt.cla()
plt.close()
except Exception as e:
print("display exception: \\n",e)
# self.log.info('<!------------- UQ model Display Error ---------------> '+str(e))
return confidencePlot,picpPlot
def classUncertainty(self,predprob_base):
# from collections import Counter
predc="Class_"
classes = np.unique(self.y)
total = len(self.y)
list_predprob=[]
counter = Counter(self.y)
#for loop for test class purpose
for k,v in counter.items():
n_samples = len(self.y[self.y==k])
per = ((v/total) * 100)
prob_c=predprob_base[:,int(k)]
list_predprob.append(prob_c)
# #print("Class_{} : {}/{} percentage={}% \\n".format(k,n_samples,total,per ))
outuq={}
for k in classes:
predc += str(k)
mean_predprob_class=np.mean(list_predprob[int(k)])
uncertainty=1-mean_predprob_class
predc+='_Uncertainty'
outuq[predc]=uncertainty
predc="Class_"
return outuq
def uqMain_BBMClassification(self):
# self.log.info('<!------------- Inside BlackBox MetaModel Classification process. ---------------> ')
# import matplotlib.pyplot as plt
try:
from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelClassification
except:
##In latest UQ360, library changed from BlackboxMetamodelClassification to MetamodelClassification.
from uq360.algorithms.blackbox_metamodel import MetamodelClassification
# from uq360.metrics.classification_metrics import area_under_risk_rejection_rate_curve,plot_risk_vs_rejection_rate,expected_calibration_error,compute_classification_metrics
from uq360.metrics.classification_metrics import plot_reliability_diagram,area_under_risk_rejection_rate_curve,plot_risk_vs_rejection_rate,expected_calibration_error,compute_classification_metrics
# from sklearn import datasets
# from sklearn.model_selection import train_test_split
# from sklearn.metrics import accuracy_score
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
# from sklearn.linear_model import LogisticRegression
# import pandas as pd
base_modelname=__class__.__name__
base_config = self.uqconfig_base
meta_config = self.uqconfig_base
model_name=self.basemodel.__class__.__name__
model_params=self.basemodel.get_params()
try:
#geting used features
model_used_features=self.basemodel.feature_names_in_
except:
pass
X_train, X_test, y_train, y_test = self.xtrain,self.xtest,self.ytrain,self.ytest
uq_scoring_param='accuracy'
basemodel=None
if (model_name == "GradientBoostingClassifier"):
basemodel=GradientBoostingClassifier
elif (model_name == "SGDClassifier"):
basemodel=SGDClassifier
elif (model_name == "GaussianNB"):
basemodel=GaussianNB
elif (model_name == "DecisionTreeClassifier"):
basemodel=DecisionTreeClassifier
elif(model_name == "RandomForestClassifier"):
basemodel=RandomForestClassifier
elif (model_name == "SVC"):
basemodel=SVC
elif(model_name == "KNeighborsClassifier"):
basemodel=KNeighborsClassifier
elif(model_name == "LogisticRegression"):
basemodel=LogisticRegression
else:
basemodel=LogisticRegression
try:
try:
##Removed meta_config because leave meta model config as default ml model params
uq_model = BlackboxMetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params)
except:
uq_model = BlackboxMetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params, meta_config=model_params)
except:
##In latest version BlackboxMetamodelClassification name modified as MetamodelClassification
try:
##Removed meta_config because leave meta model config as default ml model params
uq_model = MetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params)
except:
uq_model = MetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params, meta_config=model_params)
# this will fit both the base and the meta model
try:
X_train=X_train[model_used_features]
X_test=X_test[model_used_features]
except:
pass
uqmodel_fit = uq_model.fit(X_train, y_train,base_is_prefitted=True,meta_train_data=(X_train, y_train))
# uqmodel_fit = uq_model.fit(X_train, y_train)
#Test data pred, score
y_t_pred, y_t_score = uq_model.predict(X_test)
#predict probability
# uq_pred_prob=uq_model.predict_proba(X_test)
# predprob_base=basemodel.predict_proba(X_test)[:, :]
#if (model_name == "SVC" or model_name == "SGDClassifier"):
# if model_name in ['SVC','SGDClassifier']:
if (model_name == "SVC"):
from sklearn.calibration import CalibratedClassifierCV
basemodel=SVC(**model_params)
calibrated_svc = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3)
calibrated_svc.fit(X_train, y_train)
basepredict = basemodel.predict(X_test)
predprob_base = calibrated_svc.predict_proba(X_test)[:, :]
elif (model_name == "SGDClassifier"):
from sklearn.calibration import CalibratedClassifierCV
basemodel=SGDClassifier(**model_params)
calibrated_svc = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3)
calibrated_svc.fit(X_train, y_train)
basepredict = basemodel.predict(X_test)
predprob_base = calibrated_svc.predict_proba(X_test)[:, :]
else:
base_mdl = basemodel(**model_params)
basemodelfit = base_mdl.fit(X_train, y_train)
basepredict = base_mdl.predict(X_test)
predprob_base=base_mdl.predict_proba(X_test)[:, :]
acc_score=accuracy_score(y_test, y_t_pred)
test_accuracy_perc=round(100*acc_score)
'''
bbm_c_plot = plot_risk_vs_rejection_rate(
y_true=y_test,
y_prob=predprob_base,
selection_scores=y_t_score,
y_pred=y_t_pred,
plot_label=['UQ_risk_vs_rejection'],
risk_func=accuracy_score,
num_bins = 10 )
# This done by kiran, need to uncomment for GUI integration.
try:
bbm_c_plot_sub = bbm_c_plot[4]
bbm_c_plot.savefig(str(self.Deployment)+'/plot_risk_vs_rejection_rate.png')
riskPlot = os.path.join(self.Deployment,'plot_risk_vs_rejection_rate.png')
except Exception as e:
print(e)
pass
riskPlot = ''
'''
riskPlot = ''
'''
try:
re_plot=plot_reliability_diagram(y_true=y_test,
y_prob=predprob_base,
y_pred=y_t_pred,
plot_label=['UQModel reliability_diagram'],
num_bins=10)
# This done by kiran, need to uncomment for GUI integration.
re_plot_sub = re_plot[4]
# re_plot_sub = re_plot
re_plot_sub.savefig(str(self.Deployment)+'/plot_reliability_diagram.png')
reliability_plot = os.path.join(self.Deployment,'plot_reliability_diagram.png')
except Exception as e:
print(e)
pass
reliability_plot = ''
'''
reliability_plot = ''
uq_aurrrc=area_under_risk_rejection_rate_curve( y_true=y_test,
y_prob=predprob_base,
y_pred=y_t_pred,
selection_scores=y_t_score,
attributes=None,
risk_func=accuracy_score,subgroup_ids=None, return_counts=False,
num_bins=10)
uq_aurrrc=uq_aurrrc
test_accuracy_perc=round(test_accuracy_perc)
#metric_all=compute_classification_metrics(y_test, y_prob, option='all')
metric_all=compute_classification_metrics(y_test, predprob_base, option='accuracy')
#expected_calibration_error
uq_ece=expected_calibration_error(y_test, y_prob=predprob_base,y_pred=y_t_pred, num_bins=10, return_counts=False)
uq_aurrrc=uq_aurrrc
confidence_score=acc_score-uq_ece
ece_confidence_score=round(confidence_score,2)
# Model uncertainty using ECE score
# model_uncertainty_ece = 1-ece_confidence_score
# #print("model_uncertainty1: \\n",model_uncertainty_ece)
#Uncertainty Using model inherent predict probability
mean_predprob_total=np.mean(predprob_base)
model_uncertainty = 1-mean_predprob_total
model_confidence=mean_predprob_total
model_confidence = round(model_confidence,2)
# To get each class values and uncertainty
outuq = self.classUncertainty(predprob_base)
# Another way to get conf score
model_uncertainty_per |
=round((model_uncertainty*100),2)
# model_confidence_per=round((model_confidence*100),2)
model_confidence_per=round((ece_confidence_score*100),2)
acc_score_per = round((acc_score*100),2)
uq_ece_per=round((uq_ece*100),2)
output={}
recommendati |
()
for x in y_hat_lb:
y_hat_ub.append(x)
total_pi=y_hat_ub
medium_UQ_range = y_hat_ub
best_UQ_range= y_hat.tolist()
ymean_upper=[]
ymean_lower=[]
y_hat_m=y_hat.tolist()
for i in y_hat_m:
y_hat_m_range= (i*20/100)
x=i+y_hat_m_range
y=i-y_hat_m_range
ymean_upper.append(x)
ymean_lower.append(y)
min_best_uq_dist=round(min(best_UQ_range))
max_best_uq_dist=round(max(best_UQ_range))
# initializing ranges
list_medium=list(filter(lambda x:not(min_best_uq_dist<=x<=max_best_uq_dist), total_pi))
list_best = y_hat_m
'''
print(X_test)
print(X_test)
X_test = np.squeeze(X_test)
print(x_feature)
'''
uq_dict = pd.DataFrame(X_test)
#print(uq_dict)
uq_dict['Observed'] = y_test
uq_dict['Best_values'] = y_hat_m
uq_dict['Best__upper'] = ymean_upper
uq_dict['Best__lower'] = ymean_lower
uq_dict['Total_Low_PI'] = y_hat_lb
uq_dict['Total_Upper_PI'] = upper_bound
'''
uq_dict = {x_feature:X_test,'Observed':y_test,'Best_values': y_hat_m,
'Best__upper':ymean_upper,
'Best__lower':ymean_lower,
'Total_Low_PI': y_hat_lb,
'Total_Upper_PI': upper_bound,
}'''
uq_pred_df = pd.DataFrame(data=uq_dict)
uq_pred_df_sorted = uq_pred_df.sort_values(by='Best_values')
uq_pred_df_sorted.to_csv(str(self.Deployment)+"/uq_pred_df.csv",index = False)
csv_path=str(self.Deployment)+"/uq_pred_df.csv"
df=pd.read_csv(csv_path)
# self.log.info('uqMain() returns: observed_alphas_picp,observed_widths_mpiw,list_medium,list_best,metric_all.\\n.')
# confidenceplot = self.aion_confidence_plot(df)
# output['Confidence Plot']= confidenceplot
uq_jsonobject = json.dumps(output)
print("UQ regression problem training completed...\\n")
return observed_alphas_picp,observed_widths_mpiw,list_medium,list_best,metric_all,uq_jsonobject
except Exception as inst:
print('-------',inst)
exc = {"status":"FAIL","message":str(inst).strip('"')}
out_exc = json.dumps(exc)
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
#System imports
import logging
import os
import sys
import pickle
#Sci-Tools imports
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from scipy import stats
from word2number import w2n
#river imports
from river.preprocessing import StatImputer
from river import stats, compose, anomaly
class incProfiler():
def __init__(self):
self.DtypesDic={}
self.pandasNumericDtypes=['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
self.allNumberTypeCols = [] #all number type columns
self.allNumCols = [] #only numerical columns which includes num features and target if it is numerical
self.allCatCols = []
self.numFtrs = []
self.catFtrs = []
self.textFtrs = []
self.textVectorFtrs = []
self.numDiscreteCols = []
self.numContinuousCols = []
self.wordToNumericFeatures=[]
self.emptyCols=[]
self.missingCols = []
self.targetColumn = ""
self.le_dict = {}
self.configDict = {}
self.incFill = None
self.incLabelMapping = None
self.incCatEncoder = None
self.incScaler = None
self.incOutlierRem = None
self.log = logging.getLogger('eion')
def pickleDump(self, model, path):
if model is not None:
with open(path, 'wb') as f:
pickle.dump(model, f)
def saveProfilerModels(self, deployLocation):
if isinstance(self.incFill['num_fill'], StatImputer) or isinstance(self.incFill['cat_fill'], StatImputer):
self.pickleDump(self.incFill, os.path.join(deployLocation,'production','profiler','incFill.pkl'))
self.pickleDump(self.incLabelMapping, os.path.join(deployLocation,'production','profiler','incLabelMapping.pkl'))
self.pickleDump(self.incCatEncoder, os.path.join(deployLocation,'production','profiler','incCatEncoder.pkl'))
self.pickleDump(self.incScaler, os.path.join(deployLocation,'production','profiler','incScaler.pkl'))
self.pickleDump(self.incOutlierRem, os.path.join(deployLocation,'production','profiler','incOutlierRem.pkl'))
def featureAnalysis(self, df, conf_json, targetFeature):
try:
self.log.info('-------> Remove Duplicate Rows')
noofdplicaterows = df.duplicated(keep='first').sum()
df = df.drop_duplicates(keep="first")
df = df.reset_index(drop=True)
self.log.info('Status:- |... Duplicate row treatment done: '+str(noofdplicaterows))
self.log.info(df.head(5))
self.log.info( '\\n----------- Inspecting Features -----------')
ctn_count = 0
df = df.replace('-', np.nan)
df = df.replace('?', np.nan)
dataFDtypes=self.dataFramecolType(df)
numerical_ratio = float(conf_json['numericFeatureRatio'])
categoricalMaxLabel = int(conf_json['categoryMaxLabel'])
indexFeatures = []
numOfRows = df.shape[0]
dataCols = df.columns
for item in dataFDtypes:
if(item[1] == 'object'):
filteredDf,checkFlag = self.smartFilter(item[0],df,numerical_ratio)
if(checkFlag):
self.wordToNumericFeatures.append(item[0])
self.log.info('----------> Data Type Converting to numeric :Yes')
try:
df[item[0]]=filteredDf[item[0]].astype(float)
except:
pass
ctn_count = ctn_count+1
else:
count = (df[item[0]] - df[item[0]].shift() == 1).sum()
if((numOfRows - count) == 1):
self.log.info( '-------> Feature :'+str(item[0]))
self.log.info('----------> Sequence Feature')
indexFeatures.append(item[0])
self.configDict['wordToNumCols'] = self.wordToNumericFeatures
self.configDict['emptyFtrs'] = indexFeatures
self.log.info('Status:- |... Feature inspection done for numeric data: '+str(ctn_count)+' feature(s) converted to numeric')
self.log.info('Status:- |... Feature word to numeric treatment done: '+str(self.wordToNumericFeatures))
self.log.info( '----------- Inspecting Features End -----------\\n')
except Exception as inst:
self.log.info("Error in Feature inspection: "+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
try:
self.log.info('\\n---------- Dropping Index features ----------')
self.log.info('Index Features to remove '+str(indexFeatures))
if len(indexFeatures) > 0:
dataCols = list(set(dataCols) - set(indexFeatures))
for empCol in indexFeatures:
self.log.info('-------> Drop Feature: '+empCol)
df = df.drop(columns=[empCol])
self.log.info('---------- Dropping Index features End----------\\n')
dataFDtypes=self.dataFramecolType(df)
categoricalMaxLabel = int(conf_json['categoryMaxLabel'])
for item in dataFDtypes:
self.DtypesDic[item[0]] = item[1]
nUnique=len(df[item[0]].unique().tolist())
if item[1] in self.pandasNumericDtypes:
self.allNumberTypeCols.append(item[0])
if nUnique >= categoricalMaxLabel:
self.allNumCols.append(item[0]) #pure numerical
if item[1] in ['int16', 'int32', 'int64']:
self.numDiscreteCols.append(item[0])
elif item[1] in ['float16', 'float32', 'float64']:
self.numContinuousCols.append(item[0])
else:
self.allCatCols.append(item[0])
elif item[1] != 'bool':
if (nUnique >= categoricalMaxLabel) and targetFeature != item[0]:
self.textFtrs.append(item[0])
else:
col = item[0]
if (max(df[col].astype(str).str.split().str.len()) > 10) and targetFeature != item[0]:
self.textFtrs.append(item[0])
else:
self.allCatCols.append(item[0])
else:
self.allCatCols.append(item[0])
misval_ratio = float(conf_json['misValueRatio'])
self.configDict['misval_ratio'] = misval_ratio
missingCols, emptyCols = self.getMissingVals(df, dataCols, misval_ratio)
if targetFeature in emptyCols:
raise Exception('Target column '+str(targetFeature)+' cannot be empty')
dataCols = list(set(dataCols) - set(emptyCols))
self.log.info('\\n---------- Dropping empty features ----------')
for empCol in emptyCols:
self.log.info('-------> Drop Feature: '+empCol)
df = df.drop(columns=[empCol])
self.log.info('---------- Dropping empty features End----------\\n')
self.log.info('Status:- |... Empty feature treatment done: '+str(len(emptyCols))+' empty feature(s) found')
self.log.info('-------> Data Frame Shape After Dropping (Rows,Columns): '+str(df.shape))
self.allNumCols = list(set(self.allNumCols) - set(emptyCols))
self.allCatCols = list(set(self.allCatCols) - set(emptyCols))
self.textFtrs = list(set(self.textFtrs) - set(emptyCols))
missingValFtrs = list(set(missingCols) - set(emptyCols))
self.log.info(str(len(missingValFtrs))+' feature(s) found with missing value(s)')
self.log.info('\\n-------> Numerical continuous columns :'+(str(self.numContinuousCols))[:500])
self.log.info('-------> Numerical discrete columns :'+(str(self.numDiscreteCols))[:500])
self.log.info('-------> Non numerical columns :'+(str(self.allCatCols))[:500])
self.log.info('-------> Text columns :'+(str(self.textFtrs))[:500])
except Exception as inst:
self.log.info("Error in segregating numerical and categorical columns: "+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f |
_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
return df, missingValFtrs, emptyCols, dataCols, self.allNumCols, self.allCatCols, self.textFtrs
def createIncProfiler(self, df, conf_json, allNumCols, numFtrs, allCatCols, textFtrs, missingValFtrs):
self.incLabelMapping = None
catFtrs = allCatCols.copy()
#LabelEncoding
if self.targetColumn in allCatCols:
catFtrs.remove(self.targetColumn)
self.incLabelMapping = LabelEncoder()
df[self.targetColumn] = df[self.targetColumn].apply(str)
self.incLabelMapping.fit(df[self.targetColumn])
self.le_dict = dict(zip(self.incLabelMapping.classes_, self.incLabelMapping.transform(self.incLabelMapping.classes_)))
self.log.info('----------> Encoded Values of Target Labels: '+(str(self.le_dict))[:500])
#self.incFill --> {num_fill:SI/0.0/'drop', cat_fill:SI/0.0/'drop'}
#fill
self.incFill = {}
self.incCatEncoder = None
self.incScaler = None
self.incOutlierRem = None
num_fill_method = 'Mean'
for x in list(conf_json['numericalFillMethod'].keys()):
if conf_json['numericalFillMethod'][x] == 'True':
num_fill_method = x
break
if num_fill_method.lower() =='mean':
num_fill = [(col, stats.Mean()) for col in allNumCols]
self.incFill['num_fill'] = StatImputer(*num_fill)
elif num_fill_method.lower() =='min':
num_fill = [(col, stats.Min()) for col in allNumCols]
self.incFill['num_fill'] = StatImputer(*num_fill)
elif num_fill_method.lower() == 'max':
num_fill = [(col, stats.Max()) for col in allNumCols]
self.incFill['num_fill'] = StatImputer(*num_fill)
elif num_fill_method.lower() =='zero':
self.incFill['num_fill'] = 'zero'
elif num_fill_method.lower() =='drop':
self.incFill['num_fill'] = 'drop'
else:
num_fill = [(col, stats.Mean()) for col in allNumCols]
self.incFill['num_fill'] = StatImputer(*num_fill)
cat_fill_method = 'Mode'
for x in list(conf_json['categoricalFillMethod'].keys()):
if conf_json['categoricalFillMethod'][x] == 'True':
cat_fill_method = x
break
if cat_fill_method.lower() =='zero':
self.incFill['cat_fill'] = 'zero'
elif cat_fill_method.lower() == 'mode':
cat_fill = [(col, stats.Mode()) for col in allCatCols]
self.incFill['cat_fill'] = StatImputer(*cat_fill)
elif cat_fill_method.lower() =='drop':
self.incFill['cat_fill'] = 'drop'
#CatEncoding
for x in list(conf_json['categoryEncoding'].keys()):
if conf_json['categoryEncoding'][x] == 'True':
catEncoder = x
break
catEncHow = 'Mean'
for x in list(conf_json['targetEncodingParams']['how'].keys()):
if conf_json['targetEncodingParams']['how'][x] == 'True':
catEncHow = x
break
if self.targetColumn in catFtrs:
catFtrs.remove(self.targetColumn)
if len(catFtrs) > 0:
from river.feature_extraction import TargetAgg
if catEncHow.lower() == 'mean':
agg_stat = stats.Mean()
if catEncHow.lower() == 'bayesianmean' or catEncHow.lower() == 'bayesian mean':
agg_stat = stats.BayesianMean(prior=0.5, prior_weight=50)
self.incCatEncoder = TargetAgg(
by=catFtrs[0], how=agg_stat)
for col in catFtrs[1:]:
self.incCatEncoder += TargetAgg(
by=col, how=agg_stat)
self.incCatEncoder|= compose.Discard(*catFtrs)
#Scaling
normalization_status = 'False'
normalization_method = ""
if 'normalization' in conf_json:
nor_supported_methods = conf_json['normalization']
for k in nor_supported_methods.keys():
if conf_json['normalization'][k].lower() == 'true':
normalization_status='True'
normalization_method =k
break
if normalization_status.lower() == "true" and len(numFtrs) > 0:
from sklearn.preprocessing import MinMaxScaler, StandardScaler, MaxAbsScaler
if self.targetColumn in numFtrs:
numFtrs.remove(self.targetColumn)
if normalization_method.lower() =='standardscaler':
self.incScaler = StandardScaler()
elif normalization_method.lower() =='minmaxscaler' or normalization_method.lower() =='minmax':
self.incScaler = MinMaxScaler()
elif normalization_method.lower() =='maxabsscaler' or normalization_method.lower() =='maxabs':
self.incScaler = MaxAbsScaler()
else:
self.incScaler = None
#OutlierRemoval
outlier_status = 'False'
outlier_method = 'None'
for x in list(conf_json['outlierDetection'].keys()):
if conf_json['outlierDetection'][x] == 'True':
outlier_method = x
outlier_status = 'True'
break
if outlier_status and numFtrs:
outlierMethodNames = list(conf_json['outlierDetectionParams'].keys())
if outlier_method.lower() == 'oneclasssvm' or outlier_method.lower() == 'one class svm':
for x in outlierMethodNames:
if x[0].lower() == 'o':
key = x
break
params = conf_json['outlierDetectionParams'][key]
self.log.info('<--- one class SVM with quantile filter --->')
self.incOutlierRem = anomaly.QuantileFilter(anomaly.OneClassSVM(nu=float(params['nu'])),q=float(params['q']))
elif outlier_method.lower() =='halfspacetrees' or outlier_method.lower() =='half space trees':
for x in outlierMethodNames:
if x[0].lower() == 'h':
key = x
break
params = conf_json['outlierDetectionParams'][key]
self.log.info('<--- Half space trees with quantile filter --->')
self.incOutlierRem = anomaly.QuantileFilter(anomaly.HalfSpaceTrees(n_trees=int(params['n_trees']),height=int(params['height']), window_size=int(params['window_size'])) ,q=float(params['q']))
else:
self.log.info("No method is provided for outlier analysis")
def getMissingVals(self,dataframe,columns,misval_ratio):
try:
self.log.info( '\\n----------- Detecting for Missing Values -----------')
nonNAArray=[]
numOfRows = dataframe.shape[0]
for i in columns:
numNa=dataframe.loc[(pd.isna(dataframe[i])),i ].shape[0]
nonNAArray.append(tuple([i,numNa]))
for item in nonNAArray:
numofMissingVals = item[1]
if(numofMissingVals !=0):
self.log.info('-------> Feature '+str(item[0]))
self.log.info('----------> Number of Empty Rows '+str(numofMissingVals))
self.missingCols.append(item[0])
if(numofMissingVals >= numOfRows * misval_ratio):
self.log.info('----------> Empty: Yes')
self.log.info('----------> Permitted Rows: '+str(int(numOfRows * misval_ratio)))
self.emptyCols.append(item[0])
if(len(self.missingCols) !=0):
self.log.info( '----------- Detecting for Missing Values End -----------\\n')
return self.missingCols, self.emptyCols
else:
self.log.info( '-------> Missing Value Features :Not Any')
self.log.info( '----------- Detecting for Missing Values End -----------\\n')
return self.missingCols, self.emptyCols
except Exception as e:
self.log.info("getMissingVals failed ==>" +str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
return None, None
def startIncProfiler(self,df,conf_json,targetFeature,deployLocation,problemType):
try:
self.targetColumn = targetFeature
df, missingValFtrs, emptyFtrs, dataFtrs, allNumCols, allCatCols, textFtrs = self.featureAnalysis(df, conf_json, self.targetColumn)
if len(textFtrs)>0:
self.log.info('Text Features are not supported. Dropping '+str(textFtrs)[:500])
df = df.drop(columns=textFtrs)
catFtrs = allCatCols.copy()
numFtrs = allNumCols.copy()
if self.targetColumn in catFtrs:
catFtrs.remove(self.targetColumn)
if targetFeature in allNumCols:
numFtrs.remove(targetFeature)
self.configDict['targetCol'] = self.targetColumn
self.configDict['numFtrs'] = numFtrs
self.configDict['catFtrs'] = catFtrs
self.configDict['allNumCols'] = allNumCols
self.configDict['allCatCols'] = allCatCols
self.configDict['allFtrs'] = numFtrs+catFtrs
try:
self.log.info('\\n---------- Creating Incremental profiler models ----------')
self.createIncProfiler(df, conf_json, allNumCols, numFtrs, allCatCols, textFtrs, missingValFtrs)
self.log.info('\\n--------- Incremental profiler models have been created ---------')
except Exception as inst:
self.log.info("Error in creating Incremental profiler models"+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
raise
try:
#mvt
# if missingValFtrs:
if self.incFill['num_fill'] == 'drop':
df = df.dropna(axis = 0, subset=allNumCols)
self.configDict['num_fill'] = 'drop'
elif self.incFill['num_fill'] == 'zero':
df[allNumCols] = df[allNumCols].fillna(value = 0.0)
self.configDict['num_fill'] = 'zero'
else:
df = df.astype(object).where(df.notna(), None)
df[allNumCols]= df[allNumCols].apply(lambda row: self.apply_river_model(row.to_dict(), self.incFill
['num_fill']), axis='columns')
self.configDict['num_fill'] = {col:self.incFill['num_fill'].stats[col].get() for col in allNumCols}
if self.incFill['cat_fill'] == 'drop':
df = df.dropna(axis = 0, subset=allCatCols)
self.configDict['cat_fill'] = 'drop'
elif self.incFill['cat_fill'] == 'zero':
df[allCatCols] = df[allCatCols].fillna(value = 0.0)
self.configDict['cat_fill'] = 'zero'
else:
df = df.astype(object).where(df.notna(), None)
df[allCatCols]= df[allCatCols].apply(lambda row: self.apply_river_model(row.to_dict(), self.incFill['cat_fill']), axis='columns')
|
self.configDict['cat_fill'] = {col:self.incFill['cat_fill'].stats[col].get() for col in allCatCols}
self.log.info('\\nStatus:- |... Missing value treatment done')
except Exception as inst:
self.log.info("Error in Missing value treatment "+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
raise
try:
#labelenc
if self.incLabelMapping:
df[targetFeature] = self.incLabelMapping.transform(df[targetFeature])
# self.configDict['labelMapping'] = self.le_dict
except Exception as inst:
self.log.info("Error in Label mapping "+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
raise
try:
#catenc
if self.incCatEncoder:
self.log.info('\\n--------- Converting Non Numerical Categorical Features to Numerical Features ---------')
self.encTarget = targetFeature
if problemType.lower() == 'regression':
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
self.encTarget = 'scaledTarget'
df['scaledTarget'] = sc.fit_transform(df[targetFeature].to_numpy().reshape(-1,1))
encCols = catFtrs.copy()
encCols.append(self.encTarget)
self.configDict['encCols'] = encCols
self.configDict['encTarget'] = self.encTarget
transformed_data = df[encCols].apply(lambda row: self.apply_enc(row.to_dict()), axis='columns')
if targetFeature in transformed_data.columns:
transformed_data.drop(targetFeature, inplace=True, axis = 1)
if problemType.lower() == 'regression':
df.drop('scaledTarget', inplace=True, axis = 1)
df[catFtrs] = transformed_data
# self.log.info('Status:- |... Target Encoding state is as follows: ')
self.configDict['catEnc'] = []
if len(catFtrs) == 1:
col = catFtrs[0]
self.configDict['catEnc'].append({col:self.incCatEncoder['TargetAgg'].state.to_dict()})
else:
for i, col in enumerate(catFtrs):
if i==0:
no = ''
else:
no = str(i)
self.configDict['catEnc'].append({col:self.incCatEncoder['TransformerUnion']['TargetAgg'+no].state.to_dict()})
# print(self.incCatEncoder['TransformerUnion']['TargetAgg'].state)
# self.log.info(self.incCatEncoder)
self.log.info('Status:- |... Categorical to numeric feature conversion done: '+str(len(catFtrs))+' features converted')
except Exception as inst:
self.log.info("Error in categorical encoding "+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
raise
try:
#scaler
if self.incScaler:
self.log.info("\\n---------- Data Normalization has started ----------")
self.incScaler = self.incScaler.partial_fit(df[numFtrs])
df[numFtrs] = self.incScaler.transform(df[numFtrs])
self.log.info( "---------- Normalization Done on Following features ----------")
self.log.info(numFtrs)
self.log.info('Status:- |... Normalization treatment done')
except Exception as inst:
self.log.info("Error in normalization "+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
raise
try:
#outlierrem
if self.incOutlierRem:
self.log.info('\\n---------- Performing outlier analysis ----------')
df = df[df[numFtrs].apply(lambda x: False if self.apply_od_pipe(x.to_dict()) else True, axis=1)]
self.log.info('\\n <--- dataframe after outlier analysis --->')
df.reset_index(drop=True, inplace=True)
self.log.info(df.head(5))
self.log.info('Status:- |... Outlier treatment done')
self.log.info('\\n <--- shape of dataframe after outlier analysis --->')
self.log.info(df.shape)
except Exception as inst:
self.log.info("Error in outlier treatment "+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
raise
#saveprofiler
self.log.info('\\n---------- Saving profiler models ----------')
self.saveProfilerModels(deployLocation)
self.log.info('<--- Profiler models saved at '+deployLocation+' --->')
return df,targetFeature,missingValFtrs,numFtrs,catFtrs,self.le_dict,self.configDict,textFtrs,emptyFtrs,self.wordToNumericFeatures
except Exception as inst:
self.log.info("Error: dataProfiler failed "+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def transformData(self, df, targetFeature, missingValFtrs,numFtrs, catFtrs, textFtrs):
try:
df = df.drop_duplicates(keep="first")
df = df.reset_index(drop=True)
df = df.replace('-', np.nan)
df = df.replace('?', np.nan)
text_mv_cols = list(set(missingValFtrs).intersection(set(textFtrs)))
if len(text_mv_cols)>0:
df[text_mv_cols] = df[text_mv_cols].fillna(value = 'NA')
if 'num_fill' in self.configDict:
if self.configDict['num_fill'] == 'drop':
df = df.dropna(axis = 0, subset=self.allNumCols)
elif self.configDict['num_fill'] == 'zero':
df[self.allNumCols] = df[self.allNumCols].fillna(value = 0.0)
else:
for x in self.allNumCols:
df[x] = df[x].fillna(value = self.configDict['num_fill'][x])
if 'cat_fill' in self.configDict:
if self.configDict['cat_fill'] == 'drop':
df = df.dropna(axis = 0, subset=self.allCatCols)
elif self.configDict['cat_fill'] == 'zero':
df[self.allCatCols] = df[self.allCatCols].fillna(value = 0.0)
else:
for x in self.allCatCols:
df[x] = df[x].fillna(value = self.configDict['cat_fill'][x])
if self.incLabelMapping:
df[targetFeature] = self.incLabelMapping.transform(df[targetFeature])
if self.incCatEncoder:
transformed_data = df[catFtrs].apply(lambda row: self.apply_enc(row.to_dict(), isTrain=False), axis='columns')
df[catFtrs] = transformed_data
if self.incScaler:
df[numFtrs] = self.incScaler.transform(df[numFtrs])
return df
except Exception as inst:
self.log.info("Error: DataProfiling transformation failed "+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def checknumStr(self,dataframe,feature):
try:
dataframe[feature]=dataframe[feature].apply(lambda x: self.testStr(x))
return dataframe
except:
self.log.info("checknumStr failed")
return dataframe
#test whether the value is numeric /string
def testStr(self,value):
try:
x=eval(value)
return np.nan
except:
return value
"""
Missing values analysis
Detects number of missing values in each column of dataframe
"""
def checksRows(self,dataframe,target_column,dataColumns):
self.log.info( '\\n----------- Checking Target Feature Empty Rows -----------')
if self.targetColumn != '':
numNa=dataframe.loc[(pd.isna(dataframe[self.targetColumn])),self.targetColumn].shape[0]
self.log.info('------->No of Empty Rows in Target Fields: '+str(numNa))
if numNa >0:
self.log.info('-------> Remove Empty Target Field Rows')
dataframe = dataframe.dropna(axis=0, subset=[self.targetColumn])
self.log.info('-------> Remove Duplicate Rows')
dataframe = dataframe.dropna(axis=0,how='all',subset=dataColumns)
noofdplicaterows = dataframe.duplicated(keep='first').sum()
dataframe = dataframe.drop_duplicates(keep="first")
dataframe = dataframe.reset_index(drop=True)
return dataframe,noofdplicaterows
def apply_river_model(self, x, profModel):
profModel.learn_one(x)
return pd.Series(profModel.transform_one(x))
def apply_enc(self, x, isTrain=True):
if isTrain:
y = x[self.encTarget]
self.incCatEncoder.learn_one(x, y)
return pd.Series(self.incCatEncoder.transform_one(x))
def apply_od_pipe(self, x):
score = self.incOutlierRem.score_one(x)
is_anomaly = self.incOutlierRem.classify(score)
self.incOutlierRem.learn_one(x)
return is_anomaly
#Convert Words To Number
def s2n(self,value):
try:
x=eval(value)
return x
except:
try:
return w2n.word_to_num(value)
except:
return np.nan
def convertWordToNumeric(self,dataframe,feature):
try:
dataframe[feature]=dataframe[feature].apply(lambda x: self.s2n(x))
return dataframe
except Exception as inst:
self.log.info("convertWordToNumeric Failed ===>"+str(inst))
return dataframe
#test whether the value is numeric /string
def testNum(self,value):
try:
x=eval(value)
return x
except:
return np.nan
##check for numeric values in string column
def checkNumeric(self,dataframe,feature):
try:
dataframe[feature]=dataframe[feature].apply(lambda x: self.testNum(x))
return dataframe
except Exception as inst:
self.log.info("checkNumeric Failed ===>"+str(inst))
return dataframe
def smartFilter(self,feature,df,numericRatio):
try:
distinctCount = len(df[feature].unique())
numOfRows = df.shape[0]
tempDataFrame=df.copy(deep=True)
if(distinctCount != 1):
self.log.info('-------> Feature :'+str(feature))
testDf = self. |
checkNumeric(tempDataFrame,feature)
tempDf = testDf[feature]
tempDf = tempDf.dropna()
numberOfNonNullVals = tempDf.count()
if(numberOfNonNullVals > int(numOfRows * numericRatio)):
tempDataFrame=df.copy(deep=True)
testDf = self.convertWordToNumeric(tempDataFrame,feature)
tempDf = testDf[feature]
tempDf = tempDf.dropna()
self.log.info('----------> Numeric Status :Yes')
return testDf,True
else:
#Wasnt't a numerical feature
self.log.info('----------> Numeric Status :No')
#numDf = self.checknumStr(df,feature)
return df,False
else:
self.log.info( '\\n---> No Numerics found in :' +str(feature))
return df,False
except:
self.log.info( '\\n---> No Numerics found in :'+str(feature))
return df,False
def dataFramecolType(self,dataFrame):
dataFDtypes=[]
try:
dataColumns=list(dataFrame.columns)
for i in dataColumns:
dataType=dataFrame[i].dtypes
dataFDtypes.append(tuple([i,str(dataType)]))
return dataFDtypes
except:
self.log.info("error in dataFramecolyType")
return dataFDtypes
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import warnings
warnings.filterwarnings('ignore')
import logging
import sklearn
from random import sample
from numpy.random import uniform
import numpy as np
import math
import pickle
import os
import json
from math import isnan
from sklearn.preprocessing import binarize
from sklearn.preprocessing import LabelEncoder
import pandas as pd
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from incremental.incClassificationModel import incClassifierModel
from incremental.incRegressionModel import incRegressionModel
class incMachineLearning(object):
def __init__(self,mlobj):
self.features=[]
self.mlobj=mlobj
self.log = logging.getLogger('eion')
def startLearning(self,mlconfig,modelType,modelParams,modelList,scoreParam,features,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,targetType,deployLocation,iterName,iterVersion,trained_data_file,predicted_data_file,labelMaps):
model = 'None'
params = 'None'
score = 0xFFFF
estimator = None
model_tried = ''
threshold = -1
pscore = -1
rscore = -1
topics = {}
if(targetColumn != ''):
targetData = dataFrame[targetColumn]
datacolumns=list(dataFrame.columns)
if targetColumn in datacolumns:
datacolumns.remove(targetColumn)
scoreParam = self.mlobj.setScoreParams(scoreParam,modelType,categoryCountList)
self.log.info('\\n-------------- Training ML: Start --------------')
model_type,model,params, score, estimator,model_tried,xtrain,ytrain,xtest,ytest,threshold,pscore,rscore,method,incObj=self.startLearnerModule(mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,targetType,deployLocation,iterName,iterVersion,trained_data_file,labelMaps)
self.log.info('-------------- Training ML: End --------------\\n')
filename = os.path.join(deployLocation,'production','model',model+'.pkl')
saved_model = model+'.pkl'
pickle.dump(estimator, open(filename, 'wb'))
df_test = xtest.copy()
df_test.reset_index(inplace = True,drop=True)
trainPredictedData = incObj.bestTrainPredictedData
predictedData = incObj.bestPredictedData
try:
if(model_type == 'Classification'):
self.log.info('\\n--------- Performance Matrix with Train Data ---------')
train_matrix = self.mlobj.getClassificationPerformaceMatrix(ytrain,trainPredictedData,labelMaps)
self.log.info('--------- Performance Matrix with Train Data End ---------\\n')
self.log.info('\\n--------- Performance Matrix with Test Data ---------')
performancematrix = self.mlobj.getClassificationPerformaceMatrix(ytest,predictedData,labelMaps)
ytest.reset_index(inplace=True,drop=True)
df_test['actual'] = ytest
df_test['predict'] = predictedData
self.log.info('--------- Performance Matrix with Test Data End ---------\\n')
matrix = performancematrix
elif(model_type == 'Regression'):
self.log.info('\\n--------- Performance Matrix with Train Data ---------')
train_matrix = self.mlobj.get_regression_matrix(ytrain, trainPredictedData)
self.log.info('--------- Performance Matrix with Train Data End ---------\\n')
self.log.info('\\n--------- Performance Matrix with Test Data ---------')
matrix = self.mlobj.get_regression_matrix(ytest, predictedData)
ytest.reset_index(inplace=True, drop=True)
df_test['actual'] = ytest
df_test['predict'] = predictedData
self.log.info('--------- Performance Matrix with Test Data End ---------\\n')
except Exception as Inst:
self.log.info('--------- Error Performance Matrix ---------\\n')
self.log.info(str(Inst))
df_test['predict'] = predictedData
matrix = ""
train_matrix = ""
self.log.info('--------- Performance Matrix with Test Data End ---------\\n')
df_test.to_csv(predicted_data_file)
return 'Success',model_type,model,saved_model,matrix,train_matrix,xtrain.shape,model_tried,score,filename,self.features,threshold,pscore,rscore,method,estimator,xtrain,ytrain,xtest,ytest,topics,params
def startLearnerModule(self,mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,targetType,deployLocation,iterName,iterVersion,trained_data_file,labelMaps):
matrix = ''
threshold = -1
pscore = -1
rscore = -1
datacolumns=list(xtrain.columns)
if targetColumn in datacolumns:
datacolumns.remove(targetColumn)
self.features =datacolumns
self.log.info('-------> Features Used For Training the Model: '+(str(self.features))[:500])
xtrain = xtrain[self.features]
xtest = xtest[self.features]
method = mlconfig['optimizationMethod']
method = method.lower()
geneticParam = ''
optimizationHyperParameter = mlconfig['optimizationHyperParameter']
cvSplit = optimizationHyperParameter['trainTestCVSplit']
nIter = int(optimizationHyperParameter['iterations'])
if(method.lower() == 'genetic'):
geneticParam = optimizationHyperParameter['geneticparams']
scoreParam = scoreParam
if 'thresholdTunning' in mlconfig:
thresholdTunning = mlconfig['thresholdTunning']
else:
thresholdTunning = 'NA'
if cvSplit == "":
cvSplit =None
else:
cvSplit =int(cvSplit)
if modelType == 'classification':
model_type = "Classification"
MakeFP0 = False
MakeFN0 = False
if(len(categoryCountList) == 2):
if(thresholdTunning.lower() == 'fp0'):
MakeFP0 = True
elif(thresholdTunning.lower() == 'fn0'):
MakeFN0 = True
noOfClasses= len(labelMaps)
incObjClf = incClassifierModel(noOfClasses,modelList, modelParams, scoreParam, cvSplit, nIter,geneticParam, xtrain,ytrain,xtest,ytest,method,modelType,MakeFP0,MakeFN0,deployLocation)
model, params, score, estimator,model_tried,threshold,pscore,rscore = incObjClf.firstFit()
incObj = incObjClf
elif modelType == 'regression':
model_type = "Regression"
incObjReg = incRegressionModel(modelList, modelParams, scoreParam, cvSplit, nIter,geneticParam, xtrain,ytrain,xtest,ytest,method,deployLocation)
model,params,score,estimator,model_tried = incObjReg.firstFit()
incObj = incObjReg
return model_type,model,params, score, estimator,model_tried,xtrain,ytrain,xtest,ytest,threshold,pscore,rscore,method, incObj<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
from learner.optimizetechnique import OptimizationTq
from learner.parameters import parametersDefine
import time
import logging
import os
import sys
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error,make_scorer
from sklearn.metrics import mean_squared_error
from learner.aion_matrix import aion_matrix
class incRegressionModel():
def __init__(self,modelList,params,scoreParam,cvSplit,numIter,geneticParam,trainX,trainY,testX,testY,method,deployLocation):
self.modelList =modelList
self.params =params
self.trainX =trainX
self.trainY =trainY
self.testX = testX
self.testY = testY
self.method =method
self.scoreParam=scoreParam
self.cvSplit=cvSplit
self.numIter=numIter
self.geneticParam=geneticParam
self.log = logging.getLogger('eion')
self.deployLocation = deployLocation
self.bestTrainPredictedData = None
self.bestPredictedData = None
self.AlgorithmNames={'Online Linear Regression':'Online Linear Regression', 'Online Decision Tree Regressor':'Online Decision Tree Regressor', 'Online KNN Regressor':'Online KNN Regressor'}
self.modelToAlgoNames = {value: key for key, value in self.AlgorithmNames.items()}
def firstFit(self):
bestModel=''
bestParams={}
import sys
bestScore=-sys.float_info.max #bugfix 11656
scoredetails = ''
self.log.info('\\n---------- Regression Model has started ----------')
try:
for modelName in self.modelList:
if modelName not in self.params:
continue
paramSpace=self.params[modelName]
algoName = self.AlgorithmNames[modelName]
from incremental.riverML import riverML
riverMLObj = riverML()
self.log.info("-------> Model Name: "+str(modelName))
start = time.time()
model, modelParams, estimator, trainPredictedData = riverMLObj.startLearn('regression',algoName,paramSpace,self.trainX, self.trainY)
modelParams = str(modelParams)
executionTime=time.time() - start
self.log.info('---------> Total Execution: '+str(executionTime))
predictedData = riverMLObj.getPrediction(estimator,self.testX)
if 'neg_mean_squared_error' in self.scoreParam:
meanssquatederror = mean_squared_error(self.testY,predictedData)
score = meanssquatederror
elif 'neg_root_mean_squared_error' in self.scoreParam:
rootmeanssquatederror=mean_squared_error(self.testY,predictedData,squared=False)
score = rootmeanssquatederror
elif 'neg_mean_absolute_error' in self.scoreParam:
meanabsoluteerror=mean_absolute_error(self.testY,predictedData)
score = meanabsoluteerror
elif 'r2' in self.scoreParam:
r2score=r2_score(self.testY,predictedData)
score = round(r2score*100, 2)
if self.scoreParam == "r2":
if score>bestScore:
bestScore =score
|
bestModel =model
bestParams=modelParams
bestEstimator=estimator
self.bestTrainPredictedData = trainPredictedData
self.bestPredictedData = predictedData
else:
if abs(score) < bestScore or bestScore == -sys.float_info.max:
bestScore =abs(score)
bestModel =model
bestParams=modelParams
bestEstimator=estimator
self.bestTrainPredictedData = trainPredictedData
self.bestPredictedData = predictedData
metrices = {}
metrices["score"] = score
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","Score":'+str(abs(score))+'}'
self.log.info('Status:- |... ML Algorithm applied: '+modelName)
self.log.info("Status:- |... Testing Score: "+str(score))
self.log.info('---------- Regression Model End ---------- \\n')
self.log.info('\\n------- Best Model and its parameters -------------')
self.log.info('Status:- |... Best Algorithm selected: '+str(self.modelToAlgoNames[bestModel])+' Score='+str(round(bestScore,2)))
self.log.info("-------> Best Name: "+str(bestModel))
self.log.info("-------> Best Score: "+str(bestScore))
return self.modelToAlgoNames[bestModel],bestParams,bestScore,bestEstimator,scoredetails
except Exception as inst:
self.log.info( '\\n-----> regressionModel failed!!!.'+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))<s>
'''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import time
import os
import sys
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from sklearn.preprocessing import binarize
from learner.optimizetechnique import OptimizationTq
from learner.parameters import parametersDefine
import logging
from learner.aion_matrix import aion_matrix
# apply threshold to positive probabilities to create labels
def to_labels(pos_probs, threshold):
return (pos_probs >= threshold).astype('int')
class incClassifierModel():
def __init__(self,noOfClasses,modelList,params,scoreParam,cvSplit,numIter,geneticParam,trainX,trainY,testX,testY,method,modelType,MakeFP0,MakeFN0,deployLocation):
self.noOfClasses = noOfClasses
self.modelList =modelList
self.params =params
self.trainX =trainX
self.X =trainX
self.trainY =trainY
self.testX = testX
self.testY = testY
self.method =method
self.scoreParam=scoreParam
self.cvSplit=cvSplit
self.numIter=numIter
self.geneticParam=geneticParam
self.MakeFP0= MakeFP0
self.MakeFN0=MakeFN0
self.log = logging.getLogger('eion')
self.modelType = modelType
self.deployLocation = deployLocation
self.isRiverModel = False
self.AlgorithmNames={'Online Logistic Regression':'Online Logistic Regression', 'Online Softmax Regression':'Online Softmax Regression', 'Online Decision Tree Classifier':'Online Decision Tree Classifier', 'Online KNN Classifier':'Online KNN Classifier'}
self.modelToAlgoNames = {value: key for key, value in self.AlgorithmNames.items()}
def check_threshold(self,estimator,testX,testY,threshold_range,checkParameter,modelName):
thresholdx = -1
for threshold in threshold_range:
predictedData = estimator.predict_proba(testX)
predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold)#bug 12437
p_score = precision_score(testY, predictedData)
r_score = recall_score(testY, predictedData)
tn, fp, fn, tp = confusion_matrix(testY, predictedData).ravel()
if(checkParameter.lower() == 'fp'):
if fp == 0:
if(p_score == 1):
thresholdx = threshold
self.log.info('---------------> Best Threshold:'+str(threshold))
self.log.info('---------------> Best Precision:'+str(p_score))
self.log.info('---------------> Best Recall:'+str(r_score))
self.log.info('---------------> TN:'+str(tn))
self.log.info('---------------> FP:'+str(fp))
self.log.info('---------------> FN:'+str(fn))
self.log.info('---------------> TP:'+str(tp))
break
if(checkParameter.lower() == 'fn'):
if fn == 0:
if(r_score == 1):
thresholdx = threshold
self.log.info('---------------> Best Threshold:'+str(threshold))
self.log.info('---------------> Best Precision:'+str(p_score))
self.log.info('---------------> Best Recall:'+str(r_score))
self.log.info('---------------> TN:'+str(tn))
self.log.info('---------------> FP:'+str(fp))
self.log.info('---------------> FN:'+str(fn))
self.log.info('---------------> TP:'+str(tp))
break
return(thresholdx,p_score,r_score)
def getBestModel(self,fp0,fn0,threshold,bestthreshold,rscore,brscore,pscore,bpscore,tscore,btscore):
cmodel = False
if(threshold != -1):
if(bestthreshold == -1):
cmodel = True
bestthreshold = threshold
brscore = rscore
bpscore = pscore
btscore = tscore
elif fp0:
if rscore > brscore:
cmodel = True
bestthreshold = threshold
brscore = rscore
bpscore = pscore
btscore = tscore
elif rscore == brscore:
if tscore > btscore or btscore == -0xFFFF:
cmodel = True
bestthreshold = threshold
brscore = rscore
bpscore = pscore
btscore = tscore
elif fn0:
if pscore > bpscore:
cmodel = True
bestthreshold = threshold
brscore = rscore
bpscore = pscore
btscore = tscore
elif pscore == bpscore:
if tscore > btscore or btscore == -0xFFFF:
cmodel = True
bestthreshold = threshold
brscore = rscore
bpscore = pscore
btscore = tscore
else:
if tscore > btscore or btscore == -0xFFFF:
cmodel = True
btscore = tscore
else:
if(bestthreshold == -1):
if tscore > btscore or btscore == -0xFFFF:
cmodel = True
btscore = tscore
return cmodel,btscore,bestthreshold,brscore,bpscore
def firstFit(self):
bestModel='None'
bestParams={}
bestScore=-0xFFFF
bestEstimator = 'None'
scoredetails = ''
threshold = -1
bestthreshold = -1
precisionscore =-1
bestprecisionscore=-1
recallscore = -1
bestrecallscore=-1
self.bestTrainPredictedData = None
self.bestPredictedData = None
self.log.info('\\n---------- ClassifierModel has started ----------')
objClf = aion_matrix()
try:
for modelName in self.modelList:
paramSpace=self.params[modelName]
algoName = self.AlgorithmNames[modelName]
from incremental.riverML import riverML
riverMLObj = riverML()
self.log.info("-------> Model Name: "+str(modelName))
start = time.time()
model, modelParams, estimator, trainPredictedData = riverMLObj.startLearn('classification',algoName,paramSpace,self.trainX, self.trainY, self.noOfClasses)
modelParams = str(modelParams)
predictedData = riverMLObj.getPrediction(estimator,self.testX)
executionTime=time.time() - start
self.testY.reset_index(inplace=True, drop=True)
score = objClf.get_score(self.scoreParam,self.testY.values.flatten(),predictedData.values.flatten())
self.log.info(str(score))
metrices = {}
metrices["score"] = score
threshold = -1
precisionscore = precision_score(self.testY, predictedData, average='macro')
recallscore = recall_score(self.testY, predictedData, average='macro')
self.log.info('---------> Total Execution: '+str(executionTime))
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","Score":'+str(score)+'}'
status,bscore,bthres,brscore,bpscore = self.getBestModel(self.MakeFP0,self.MakeFN0,threshold,bestthreshold,recallscore,bestrecallscore,precisionscore,bestprecisionscore,score,bestScore)
if status:
bestScore =bscore
bestModel =model
bestParams=modelParams
bestEstimator=estimator
bestthreshold = threshold
bestrecallscore = recallscore
bestprecisionscore = precisionscore
self.bestTrainPredictedData = trainPredictedData
self.bestPredictedData = predictedData
self.log.info('Status:- |... ML Algorithm applied: '+modelName)
self.log.info("Status:- |... Testing Score: "+str(score))
self.log.info('---------- ClassifierModel End ---------- \\n')
self.log.info('\\n------- Best Model and its parameters -------------')
self.log.info('Status:- |... Best Algorithm selected: '+str(self.modelToAlgoNames[bestModel])+' Score='+str(round(bestScore,2)))
self.log.info("-------> Best Name: "+str(bestModel))
self.log.info("-------> Best Score: "+str(bestScore))
return self.modelToAlgoNames[bestModel],bestParams,bestScore,bestEstimator,scoredetails,bestthreshold,bestprecisionscore,bestrecallscore
except Exception as inst:
self.log.info( '\\n-----> ClassifierModel failed!!!.'+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
<s>
import logging
import pickle
import os
import sys
import pandas as pd
from river import stream
from river.linear_model import LogisticRegression, SoftmaxRegression, LinearRegression
from river.tree import ExtremelyFastDecisionTreeClassifier, HoeffdingAdaptiveTreeRegressor
# from river.ensemble import AdaptiveRandomForestRegressor, AdaptiveRandomForestClassifier
from river.neighbors import KNNClassifier, KNNRegressor
from river.multiclass import OneVsRestClassifier
from river.optim import SGD, Adam, AdaDelta, NesterovMomentum, RMSProp
# from river.optim.losses import CrossEntropy, Log, MultiClassLoss, Poisson, RegressionLoss, BinaryLoss, Huber
# from river.optim.initializers import Normal
class riverML(object):
def __init__(self):
self.algoDict={'Online Logistic Regression':LogisticRegression, 'Online Softmax Regression':SoftmaxRegression, 'Online Decision Tree Classifier':ExtremelyFastDecisionTreeClassifier, 'Online KNN Classifier': |
KNNClassifier,'Online Linear Regression':LinearRegression, 'Online Decision Tree Regressor':HoeffdingAdaptiveTreeRegressor, 'Online KNN Regressor':KNNRegressor}
self.optDict={'sgd': SGD, 'adam':Adam, 'adadelta':AdaDelta, 'nesterovmomentum':NesterovMomentum, 'rmsprop':RMSProp}
self.log = logging.getLogger('eion')
def getPrediction(self, model,X):
testStream = stream.iter_pandas(X)
preds = []
for (xi,yi) in testStream:
pred = model.predict_one(xi)
preds.append(pred)
return pd.DataFrame(preds)
def startLearn(self,problemType,algoName,params,xtrain,ytrain,noOfClasses=None):
try:
model = self.algoDict[algoName]
params = self.parseParams(params, algoName)
if problemType == 'classification':
if noOfClasses>2:
model = OneVsRestClassifier(classifier=model(**params))
else:
model = model(**params)
else:
model = model(**params)
trainStream = stream.iter_pandas(xtrain, ytrain)
#head start
for i, (xi, yi) in enumerate(trainStream):
if i>100:
break
if yi!=None:
model.learn_one(xi, yi)
trainPredictedData = []
trainStream = stream.iter_pandas(xtrain, ytrain)
for i, (xi, yi) in enumerate(trainStream):
if yi!=None:
trainPredictedData.append(model.predict_one(xi))
model.learn_one(xi, yi)
trainPredictedData = pd.DataFrame(trainPredictedData)
return algoName, params, model, trainPredictedData
except Exception as inst:
self.log.info( '\\n-----> '+algoName+' failed!!!.'+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def parseParams(self, params, algoName):
try:
from learner.parameters import parametersDefine
paramsObj = parametersDefine()
paramDict =paramsObj.paramDefine(params,method=None)
paramDict = {k:v[0] for k,v in paramDict.items()}
if algoName=='Online Logistic Regression' or algoName=='Online Softmax Regression' or algoName=='Online Linear Regression':
opt = self.optDict[paramDict.pop('optimizer').lower()]
lr = float(paramDict.pop('optimizer_lr'))
paramDict['optimizer'] = opt(lr)
return paramDict
except Exception as inst:
self.log.info( '\\n-----> Parameter parsing failed!!!.'+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
<s> import json
import sys,os
from pathlib import Path, PurePosixPath
from fabric import Connection
import tarfile
import copy
from hyperscalers.cloudServer import awsGPUTraining
import time
import shutil
import logging
import multiprocessing
from hyperscalers.mergeLogs import mergeLogs
class AION(awsGPUTraining):
def __init__(self, config):
config['AMAZON_EC2']['InstanceIds'] = [] #removing the support for Instance Id
super().__init__(config)
self.remoteUpload = {}
def copyDataOnServer(self, index):
try:
client = Connection(
host=self.serverIP,
user=self.sshConfig["userName"],
connect_kwargs={
"key_filename": self.sshConfig["keyFilePath"],
},
)
client.run( 'mkdir -p {}'.format(self.remoteUpload['remoteDeployLocation']))
client.put(self.remoteUpload['configFile'], self.remoteUpload['remoteConfigLoc'])
if not Path(self.remoteUpload['dataLoc']).exists():
raise ValueError(" data location {} does not exist".format(self.remoteUpload['dataLoc']))
if Path(self.remoteUpload['dataLoc']).is_file():
client.put(self.remoteUpload['dataLoc'], self.remoteUpload['remoteDataLoc'])
else:
client.run( 'mkdir -p {}'.format(self.remoteUpload['remoteDataLoc']))
p = Path(self.remoteUpload['dataLoc']).glob('**/*')
files = [x for x in p if x.is_file()]
for file in files:
client.put(file, self.remoteUpload['remoteDataLoc'])
if self.remoteUpload.get('imgCsvLoc', None):
client.put(self.remoteUpload['imgCsvLoc'], self.remoteUpload['remoteDataLoc'])
except Exception as e:
raise ValueError("Error in copying data to cloud server. " + str(e))
def executeCode(self):
try:
client = Connection(
host=self.serverIP,
user=self.sshConfig["userName"],
connect_kwargs={
"key_filename": self.sshConfig["keyFilePath"],
},
)
cmd = '{} {} {}'.format("/home/ubuntu/aws/venv/aion-env/bin/python3.8", "/home/ubuntu/aws/venv/aion-env/lib/python3.8/site-packages/AION/aion.py", self.remoteUpload['remoteConfigLoc'])
output = client.run( cmd, warn=True)
except Exception as e:
raise ValueError("Error in running code on cloud server. " + str(e))
def downloadAndExtractModel(self):
try:
client = Connection(
host=self.serverIP,
user=self.sshConfig["userName"],
connect_kwargs={
"key_filename": self.sshConfig["keyFilePath"],
},
)
remote = PurePosixPath(self.remoteUpload['remoteDeployLocation'])
fileName = self.remoteUpload['deployName']
local = Path(self.remoteUpload['localDeployLocation'])
tarFileName = fileName+".tar.gz"
cmd = 'cd {};tar -czvf {} -C {}/ {}'.format(remote, tarFileName, remote, fileName)
client.run( cmd)
extractFile = str(local/tarFileName)
client.get( str(remote/tarFileName), extractFile)
with tarfile.open(extractFile, "r:gz") as tar:
tar.extractall(local)
Path(extractFile).unlink()
client.run( 'rm -r {}'.format(remote/fileName))
client.run( 'rm {}'.format(remote/tarFileName))
except Exception as e:
raise ValueError("Error in downloading file from server. " + str(e))
def deleteDataOnServer(self):
client = Connection(
host=self.serverIP,
user=self.sshConfig["userName"],
connect_kwargs={
"key_filename": self.sshConfig["keyFilePath"],
},
)
dataPaths = [self.remoteUpload['remoteDataLoc'], self.remoteUpload['remoteDeployLocation'], self.remoteUpload['remoteConfigLoc']]
for loc in dataPaths:
if Path(loc).is_file():
client.run( 'rm {}'.format(loc))
else:
client.run( 'rm -r {}'.format(loc))
# only for csv files
def updateConfigGetRemoteLoc(self, config, index=0):
remote_location = '/home/ubuntu/aws/usecase'
remoteInputLoc = PurePosixPath(remote_location)/"input"
remoteOutputLoc = PurePosixPath(remote_location)/"target"
if Path(config['basic']['dataLocation']).is_dir():
if Path(config['basic']['folderSettings']['labelDataFile']).parent !=Path(config['basic']['dataLocation']):
self.remoteUpload['imgCsvLoc'] = config['basic']['folderSettings']['labelDataFile']
config['basic']['folderSettings']['labelDataFile'] = Path(config['basic']['folderSettings']['labelDataFile']).name
csvFile = Path(config['basic']['dataLocation']).name
localFile = config['basic']['dataLocation']
localDeployLoc = config['basic']['deployLocation']
config['basic']['dataLocation'] = str(remoteInputLoc/csvFile)
config['basic']['deployLocation'] = str(remoteOutputLoc)
jsonFile = Path(__file__).parent/'remote_{}.json'.format(index)
with open(jsonFile,"w") as f:
json.dump(config, f)
self.remoteUpload['remoteDataLoc'] = config['basic']['dataLocation']
self.remoteUpload['remoteConfigLoc'] = str(remoteInputLoc)+ "/temp.json"
self.remoteUpload['remoteDeployLocation'] = config['basic']['deployLocation']
self.remoteUpload['dataLoc'] = localFile
self.remoteUpload['configFile'] = str(jsonFile)
self.remoteUpload['localDeployLocation'] = localDeployLoc
self.remoteUpload['deployName'] = "{}_{}".format(config['basic']['modelName'],config['basic']['modelVersion'])
def updateDeployPath(self):
import fileinput
logFile = Path(self.remoteUpload['localDeployLocation'])/self.remoteUpload['deployName']/"model_training_logs.log"
self.remoteUpload['localDeployLocation'] = self.remoteUpload['localDeployLocation'].replace('\\\\','/')
if Path(logFile).exists():
with fileinput.FileInput(logFile, inplace=True, backup='.bak') as file:
for line in file:
remoteLoc = self.remoteUpload['remoteDeployLocation'] +'/'+ self.remoteUpload['deployName']
localLoc = self.remoteUpload['localDeployLocation'] +'/'+ "_".join(self.remoteUpload['deployName'].split('_')[:-1])
print(line.replace(remoteLoc, localLoc), end='')
logFile = Path(self.remoteUpload['localDeployLocation'])/self.remoteUpload['deployName']/"output.json"
if Path(logFile).exists():
with fileinput.FileInput(logFile, inplace=True, backup='.bak') as file:
for line in file:
remoteLoc = self.remoteUpload['remoteDeployLocation'] +'/'+ self.remoteUpload['deployName']
localLoc = self.remoteUpload['localDeployLocation'] +'/'+ "_".join(self.remoteUpload['deployName'].split('_')[:-1])
print(line.replace(remoteLoc, localLoc), end='')
logFile = Path(self.remoteUpload['localDeployLocation'])/self.remoteUpload['deployName']/"display.json"
if Path(logFile).exists():
with fileinput.FileInput(logFile, inplace=True, backup='.bak') as file:
for line in file:
remoteLoc = self.remoteUpload['remoteDeployLocation'] +'/'+ self.remoteUpload['deployName']
localLoc = self.remoteUpload['localDeployLocation'] +'/'+ "_".join(self.remoteUpload['deployName'].split('_')[:-1])
print(line.replace(remoteLoc, localLoc), end='')
def updateUserServerConfig(aws_config):
aws_config['ssh']['keyFilePath'] = str(Path(__file__).parent/"AION_GPU.pem")
return aws_config
def getKeyByValue(dictionary, refValue):
for key, value in dictionary.items():
if value == refValue:
return key
return None
def getKeysByValue(dictionary, refValue):
keys = []
for key, value in dictionary.items():
if value == refValue:
keys.append(key)
return keys
class openInstancesStatus():
def __init__(self):
pass
def addInstance(self, instanceId, args=None):
fileName = instanceId + '.ec2instance'
data = {}
data[instanceId] = args
with open(fileName, "w") as f:
json.dump( data, f, indent=4) #TODO do we need to encrypt
def removeInstance(self, instanceId):
fileName = instanceId + '.ec2instance'
if Path(fileName).exists():
Path(fileName).unlink()
def clearPreviousInstancesState(self):
# check and stop the previous instance
openInstances = Path().glob("*.ec2instance")
for file in openInstances:
with open(file, 'r') as f:
data = json.load(f)
prevConfig = list(data.values())[0]
key = Path(file).stem
if prevConfig['AMAZON_EC2']['amiId']:
prevConfig['AMAZON_EC2']['InstanceIds'] = [key]
prevConfig['AMAZON_EC2']['amiId'] = "" # clear amiId
instance = awsGPUTraining(prevConfig)
if len(prevConfig['AMAZON_EC2']['InstanceIds']) > 0:
try:
if instance.is_instance_running(prevConfig['AMAZON_EC2']['InstanceIds'][0]):
instance.stop_server_instance()
except:
pass
self.removeInstance(key)
class prepareConfig():
def __init__(self, config,noOfInstance,ComputeInfrastructure):
if isinstance(config, dict):
self.config = config
self.configDir = Path(__file__).parent
elif isinstance(config, str):
with open(config, 'r') as f:
self.config = json.load(f)
self.configDir = Path(config).parent
else:
raise TypeError("{} type object is not supported for config".format(type(config)))
self.problemType = getKeyByValue(self.config['basic']['analysisType'] ,"True")
self.algorithms = getKeysByValue(self.config['basic']['algorithms'][self.problemType] ,"True")
self.numInstances = int(noOfInstance)
self.computeInfrastructure = ComputeInfrastructure
self.isMultiInstance = False
self.validateMultiInstance()
self.newConfigs = []
def isRemoteTraining(self):
return True if(self.computeInfrastructure == "True") else False
def validateMultiInstance(self):
if self.isRemoteTraining():
if self.problemType == 'classification' or self.problemType == 'regression':
if self.numInstances > len(self.algorithms):
self.numInstances = len(self.algorithms)
if len(self.algorithms) > 1 and self.numInstances > 1:
self.isMultiInstance = True
def createNewConfigs(self):
configs = []
algos |
= self.algorithms
if len(algos) <= self.numInstances:
self.numInstances = len(algos)
algosPerInstances = (len(algos)+(self.numInstances - 1))//self.numInstances
remainingAlgos = len(algos)
for i in range(self.nu |
ances = Path().glob("*.ec2instance")
for file in openInstances:
with open(file, 'r') as f:
data = json.load(f)
prevConfig = list(data.values())[0]
key = Path(file).stem
if prevConfig['AMAZON_EC2']['amiId']:
prevConfig['AMAZON_EC2']['InstanceIds'] = [key]
prevConfig['AMAZON_EC2']['amiId'] = "" # clear amiId
instance = awsGPUTraining(prevConfig)
if len(prevConfig['AMAZON_EC2']['InstanceIds']) > 0:
try:
if instance.is_instance_running(prevConfig['AMAZON_EC2']['InstanceIds'][0]):
instance.stop_server_instance()
except:
pass
self.removeInstance(key)
class prepareConfig():
def __init__(self, config,noOfInstance,ComputeInfrastructure):
if isinstance(config, dict):
self.config = config
self.configDir = Path(__file__).parent
elif isinstance(config, str):
with open(config, 'r') as f:
self.config = json.load(f)
self.configDir = Path(config).parent
else:
raise TypeError("{} type object is not supported for config".format(type(config)))
self.problemType = getKeyByValue(self.config['basic']['analysisType'] ,"True")
self.algorithms = getKeysByValue(self.config['basic']['algorithms'][self.problemType] ,"True")
self.numInstances = int(noOfInstance)
self.computeInfrastructure = ComputeInfrastructure
self.isMultiInstance = False
self.validateMultiInstance()
self.newConfigs = []
def isRemoteTraining(self):
return True if(self.computeInfrastructure == "True") else False
def validateMultiInstance(self):
if self.isRemoteTraining():
if self.problemType == 'classification' or self.problemType == 'regression':
if self.numInstances > len(self.algorithms):
self.numInstances = len(self.algorithms)
if len(self.algorithms) > 1 and self.numInstances > 1:
self.isMultiInstance = True
def createNewConfigs(self):
configs = []
algos = self.algorithms
if len(algos) <= self.numInstances:
self.numInstances = len(algos)
algosPerInstances = (len(algos)+(self.numInstances - 1))//self.numInstances
remainingAlgos = len(algos)
for i in range(self.numInstances):
newConfig = copy.deepcopy(self.config)
for k,v in newConfig['basic']['algorithms'][self.problemType].items():
newConfig['basic']['algorithms'][self.problemType][k] = "False"
algosPerInstances = remainingAlgos // (self.numInstances - i)
for j in range(algosPerInstances):
newConfig['basic']['algorithms'][self.problemType][algos[len(algos) - remainingAlgos + j]] = "True"
newConfig['basic']['modelVersion'] = newConfig['basic']['modelVersion'] + "_{}".format(i)
newFileName = str(self.configDir/"splittedConfig_{}.json".format(i))
with open(newFileName, 'w') as jsonFile:
json.dump(newConfig, jsonFile, indent=4)
configs.append(newFileName)
remainingAlgos -= algosPerInstances
return configs
class Process(multiprocessing.Process):
def __init__(self, aws_config, configFile, index, openInstancesLog):
super(Process, self).__init__()
self.index = index
self.aws_config = aws_config
self.configFile = configFile
self.openInstances = openInstancesLog
def run(self):
log = logging.getLogger('eion')
serverStart = False
try:
server = AION(self.aws_config)
with open(self.configFile,"r") as f:
config = json.load(f)
server.updateConfigGetRemoteLoc(config, self.index)
instanceId = server.start_instance()
log.info('Status:-|... start instance: {}'.format(instanceId))
serverStart = True
self.openInstances.addInstance(instanceId, self.aws_config)
time.sleep(40)
log.info('Status:-|... copying data on instance: {}'.format(instanceId))
server.copyDataOnServer( config)
log.info('Status:-|... Training on instance: {}'.format(instanceId))
server.executeCode()
log.info('Status:-|... downloading data from instance: {}'.format(instanceId))
server.downloadAndExtractModel()
server.deleteDataOnServer()
log.info('Status:-|... stopping instance: {}'.format(instanceId))
server.stop_server_instance()
serverStart = False
self.openInstances.removeInstance(instanceId)
server.updateDeployPath()
except Exception as e:
print(e)
pass
finally:
if serverStart:
log.info('Status:-|... stopping instance: {}'.format(instanceId))
server.stop_server_instance()
self.openInstances.removeInstance(instanceId)
def awsTraining(configPath):
try:
# This function responsible for starting the training with AWS
with open(configPath, "r") as f:
config = json.load(f)
ec2 = boto3.resource('ec2',region_name=AWS_Region)
instance_id= instance[0].instance_id
deployFolder = config['basic']['deployLocation']
iterName = config['basic']['modelName']
iterVersion = config['basic']['modelVersion']
dataLocation = config['basic']['dataLocation']
usecaseLocation = os.path.join(deployFolder,iterName)
if not Path(usecaseLocation).exists():
os.makedirs(usecaseLocation)
deployLocation = os.path.join(usecaseLocation,iterVersion)
if Path(deployLocation).exists():
shutil.rmtree(deployLocation)
os.makedirs(deployLocation)
logLocation = os.path.join(deployLocation,'log')
if not Path(logLocation).exists():
os.makedirs(logLocation)
#read the server config
logFileName=os.path.join(logLocation,'model_training_logs.log')
filehandler = logging.FileHandler(logFileName, 'w','utf-8')
formatter = logging.Formatter('%(message)s')
filehandler.setFormatter(formatter)
log = logging.getLogger('eion')
log.propagate = False
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
log.removeHandler(hdlr)
log.addHandler(filehandler)
log.setLevel(logging.INFO)
log.info('Status:-|... Compute Infrastructure:AMAZON EC2')
with open(Path(__file__).parent/"../config/compute.conf", "r") as f:
aws_config = json.load(f)
aws_config = updateUserServerConfig(aws_config)
configSplitter = prepareConfig(sys.argv[1],aws_config['AMAZON_EC2']['NoOfInstance'],aws_config['ComputeInfrastructure'])
newConfigs = configSplitter.createNewConfigs()
print(newConfigs)
openInstances = openInstancesStatus()
openInstances.clearPreviousInstancesState()
folders = []
processes = [0] * len(newConfigs)
for index, config in enumerate(newConfigs):
processes[index] = Process(aws_config, config, index, openInstances)
processes[index].start()
for index, config in enumerate(newConfigs):
processes[index].join()
folders.append(deployLocation + '_{}'.format(index))
if Path(deployLocation+'_0').exists():
filehandler.close()
log.removeHandler(filehandler)
merge = mergeLogs(folders)
merge.mergeFolder()
else:
output = {"status":"FAIL","message":"Please check cloud server configuration."}
output = json.dumps(output)
log.info('server code execution failed !....')
log.info('\\n------------- Output JSON ------------')
log.info('-------> Output :'+str(output))
log.info('------------- Output JSON ------------\\n')
print("\\n")
print("aion_learner_status:",output)
print("\\n")
except Exception as inst:
output = {"status":"FAIL","message":str(inst).strip('"')}
output = json.dumps(output)
log.info('server code execution failed !....'+str(inst))
log.info('\\n------------- Output JSON ------------')
log.info('-------> Output :'+str(output))
log.info('------------- Output JSON ------------\\n')
print("\\n")
print("aion_learner_status:",output)
print("\\n")
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import boto3
import json
import time
import requests
import datetime
import uuid
import shutil
from websocket import create_connection
from botocore.exceptions import ClientError
import tarfile
from pathlib import Path, PurePosixPath
from stat import S_ISDIR
from fabric import Connection
import time
import logging
class awsGPUTraining():
def __init__(self, config):
local_config = {"location":{"data":"aion/data/od", "code":"", "pretrainedModel":"aion/pretrainedModels"},
"jupyter":{"header":{"Authorization":"Token f3af05d5348301997fb014f245569e872d27bb9018fd70d2"}, "portNo":"8888",
"notebook_path":"aion/code/AWS_GPU_OD_Training.ipynb"}}
self.serverConfig = config["server"]
self.sshConfig = config["ssh"]
self.log = logging.getLogger('eion')
self.codeLocation = local_config["location"]["code"]
self.dataLocation = local_config["location"]["data"]
self.pretrainedModelLocation = local_config["location"]["pretrainedModel"]
self.jupyterConfig = local_config["jupyter"]
self.serverIP = ""
if self.serverConfig["awsAccessKeyId"] == "" or self.serverConfig["awsSecretAccessKey"] == "":
raise ValueError("Cloud server configuration is not available.")
if len(self.serverConfig["InstanceIds"]) == 0 and self.serverConfig["amiId"] == "":
raise ValueError("Please provide either InstanceIds or amiId in server config")
self.instanceId = []
self.separate_instance = False
if self.serverConfig["amiId"] != "":
self.separate_instance = True
else:
if len(self.serverConfig["InstanceIds"]):
if isinstance(self.serverConfig["InstanceIds"], list):
self.instanceId = self.serverConfig["InstanceIds"]
elif isinstance(self.serverConfig["InstanceIds"], str):
self.instanceId = [self.serverConfig["InstanceIds"]]
self.ec2_client = boto3.client(self.serverConfig["serverName"], region_name=self.serverConfig["regionName"], aws_access_key_id=self.serverConfig["awsAccessKeyId"], aws_secret_access_key=self.serverConfig["awsSecretAccessKey"])
def __sftp_exists(self, sftp, path):
try:
sftp.stat(path)
return True
except:# IOError, e:
#if e.errno == errno.ENOENT:
return False
def __rmtree(self, sftp, remotepath, level=0):
for f in sftp.listdir_attr(remotepath):
rpath = str(PurePosixPath(remotepath)/f.filename)
if S_ISDIR(f.st_mode):
self.__rmtree(sftp, rpath, level=(level + 1))
sftp.rmdir(rpath)
else:
rpath = str(PurePosixPath(remotepath)/f.filename)
sftp.remove(rpath)
def copy_files_to_server(self, location):
try:
client = Connection(
host=self.serverIP,
user=self.sshConfig["userName"],
connect_kwargs={
"key_filename": self.sshConfig["keyFilePath"],
},
)
client.sudo('rm -rf {}/*'.format(self.dataLocation))
tarFile = str((PurePosixPath(self.dataLocation).parent/PurePosixPath(self.dataLocation).name).with_suffix(".tar.gz"))
client.put(location+'/test.tfrecord', self.dataLocation+'/test.tfrecord')
client.put(location+'/train.tfrecord', self.dataLocation+'/train.tfrecord')
client.put(location+'/pipeline.config', self.dataLocation+'/pipeline.config')
client.put(location+'/label_map.pbtxt', self.dataLocation+'/label_map.pbtxt')
client.put(location+'/model.config', self.dataLocation+'/model.config')
if self.jupyterConfig != "":
client.run("touch {}".format(self.dataLocation+'/log.txt'))
except Exception as e:
raise ValueError("Error in copying data to cloud server. " + str(e))
def __myexec(self, ssh, cmd, timeout, want_exitcode=False):
# one channel per command
stdin, stdout, stderr = ssh.exec_command(cmd)
# get the shared channel for stdout/stderr/stdin
channel = stdout.channel
# we do not need stdin.
stdin.close()
# indicate that we're not going to write to that channel anymore
channel.shutdown_write()
# read stdout/stderr in order to prevent read block hangs
stdout_chunks = []
stdout_chunks.append(stdout.channel.recv(len(stdout.channel.in_buffer)))
# chunked read to prevent stalls
while not channel.closed or channel.recv_ready() or channel.recv_stderr_ready():
# stop if channel was closed prematurely, and there is no data in the buffers.
got_chunk = False
readq, _, _ = select.select([stdout.channel], [], [], timeout)
for c in readq:
|
if c.recv_ready():
stdout_chunks.append(stdout.channel.recv(len(c.in_buffer)))
got_chunk = True
if c.recv_stderr_ready():
# make sure to read stderr to prevent stall
stderr.channel.recv_stderr(len(c.in_stderr_buffer))
got_chunk = True
'''
1) make sure that there are at least 2 cycles with no data in the input buffers in order to not exit too early (i.e. cat on a >200k file).
2) if no data arrived in the last loop, check if we already received the exit code
3) check if input buffers are empty
4) exit the loop
'''
if not got_chunk \\
and stdout.channel.exit_status_ready() \\
and not stderr.channel.recv_stderr_ready() \\
and not stdout.channel.recv_ready():
# indicate that we're not going to read from this channel anymore
stdout.channel.shutdown_read()
# close the channel
stdout.channel.close()
break # exit as remote side is finished and our bufferes are empty
# close all the pseudofiles
stdout.close()
stderr.close()
if want_exitcode:
# exit code is always ready at this point
return (''.join(stdout_chunks), stdout.channel.recv_exit_status())
return ''.join(stdout_chunks)
def __myexec1(self, ssh, cmd, timeout, want_exitcode=False):
# one channel per command
stdin, stdout, stderr = ssh.exec_command(cmd, get_pty=True)
for line in iter(stderr.readline, ""):
print(line, end="")
stdin.close()
stdout.close()
stderr.close()
def executeCode(self):
try:
client = Connection(
host=self.serverIP,
user=self.sshConfig["userName"],
connect_kwargs={
"key_filename": self.sshConfig["keyFilePath"],
},
)
cmd = 'python3.8 {} {} {}'.format(self.codeLocation, self.dataLocation, self.pretrainedModelLocation)
client.run( cmd)
except Exception as e:
raise ValueError("Error in running code on cloud server. " + str(e))
def start_executing_notebook(self):
try:
publicIp_Port = self.serverIP + ":" + self.jupyterConfig["portNo"]
conURL = "ws://" + publicIp_Port
base = 'http://' + publicIp_Port + ''
headers = self.jupyterConfig["header"]
url = base + '/api/kernels'
flag = True
while flag: # deadlock need to add timeout
response = requests.post(url, headers=headers)
flag = False
kernel = json.loads(response.text)
# Load the notebook and get the code of each cell
url = base + '/api/contents/' + self.jupyterConfig["notebook_path"]
response = requests.get(url, headers=headers)
file = json.loads(response.text)
code = [c['source'] for c in file['content']['cells'] if len(c['source']) > 0 and c['cell_type']=='code' ]
ws = create_connection(conURL + "/api/kernels/" + kernel["id"] + "/channels",
header=headers)
def send_execute_request(code):
msg_type = 'execute_request';
content = {'code': code, 'silent': False}
hdr = {'msg_id': uuid.uuid1().hex,
'username': 'test',
'session': uuid.uuid1().hex,
'data': datetime.datetime.now().isoformat(),
'msg_type': msg_type,
'version': '5.0'}
msg = {'header': hdr, 'parent_header': hdr,
'metadata': {},
'content': content}
return msg
for c in code:
ws.send(json.dumps(send_execute_request(c)))
# We ignore all the other messages, we just get the code execution output
# (this needs to be improved for production to take into account errors, large cell output, images, etc.)
error_msg = ''
traceback_msg = ''
for i in range(0, len(code)):
msg_type = '';
while msg_type != "stream":
rsp = json.loads(ws.recv())
msg_type = rsp["msg_type"]
if msg_type == 'error':
raise ValueError("Error on Cloud machine: "+rsp['content']['evalue'])
ws.close()
self.log.info('Status:- |...Execution Started`')
except ClientError as e:
raise ValueError(e)
def __wait_for_completion(self, sftp, remoteLogFile, localLogFile):
waiting = True
error_msg = ""
while waiting:
time.sleep(5 * 60)
try:
sftp.get(str(remoteLogFile), str(localLogFile))
with open(localLogFile, "r") as f:
content = f.readlines()
for x in content:
if "Error" in x:
waiting = False
error_msg = x
if "success" in x:
waiting = False
except:
raise (str(e))
return error_msg
def copy_file_from_server(self, localPath):
try:
client = Connection(
host=self.serverIP,
user=self.sshConfig["userName"],
connect_kwargs={
"key_filename": self.sshConfig["keyFilePath"],
},
)
remoteLogFile = PurePosixPath(self.dataLocation)/'log.txt'
localLogFile = Path(localPath)/'remote_log.txt'
client.get(str(remoteLogFile), str(localLogFile))
tarFile = (PurePosixPath(self.dataLocation).parent/PurePosixPath(self.dataLocation).name).with_suffix(".tar.gz")
client.get(str(tarFile), str(Path(localPath)/tarFile.name))
except:
raise
return str(Path(localPath)/tarFile.name)
def create_instance(self):
instances = self.ec2_client.run_instances(
ImageId=self.serverConfig["amiId"],
MinCount=1,
MaxCount=1,
InstanceType="t2.xlarge",
KeyName="AION_GPU",
SecurityGroupIds = ["sg-02c3a6c8dd67edb74"]
)
self.instanceId = [instances['Instances'][0]['InstanceId']]
def start_instance(self):
if self.separate_instance:
self.create_instance()
try:
response = self.ec2_client.start_instances(InstanceIds=self.instanceId, DryRun=True)
except Exception as e:
if 'DryRunOperation' not in str(e):
raise ValueError("Error in starting the EC2 instance, check server configuration. " + str(e))
try:
running_state_code = 16
response = self.ec2_client.start_instances(InstanceIds=self.instanceId, DryRun=False)
instance_status_code = 0
while instance_status_code != running_state_code:
response = self.ec2_client.describe_instances(InstanceIds=self.instanceId)
instance_status_code = response['Reservations'][0]['Instances'][0]['State']['Code']
if instance_status_code == running_state_code:
self.serverIP = response['Reservations'][0]['Instances'][0]['PublicIpAddress']
break
except ClientError as e:
raise ValueError("Error in starting the EC2 instance. " + str(e))
def terminate_instance(self):
ec2 = boto3.resource(self.serverConfig["serverName"], region_name=self.serverConfig["regionName"], aws_access_key_id=self.serverConfig["awsAccessKeyId"], aws_secret_access_key=self.serverConfig["awsSecretAccessKey"])
ec2.instances.filter(InstanceIds=self.instanceId).terminate() # for terminating an ec2 instance
def stop_server_instance(self):
try:
self.ec2_client.stop_instances(InstanceIds=self.instanceId, DryRun=True)
except Exception as e:
if 'DryRunOperation' not in str(e):
raise
stopped_state_code = 80
# Dry run succeeded, call stop_instances without dryrun
try:
response = self.ec2_client.stop_instances(InstanceIds=self.instanceId, DryRun=False)
response = self.ec2_client.describe_instances(InstanceIds=self.instanceId)
instance_status_code = 0
while instance_status_code != stopped_state_code:
response = self.ec2_client.describe_instances(InstanceIds=self.instanceId)
instance_status_code = response['Reservations'][0]['Instances'][0]['State']['Code']
if instance_status_code == stopped_state_code:
break
except:
raise ValueError("Error in stopping the EC2 instance {}.Please stop it manually ".format(self.instanceId[0]))
if self.separate_instance:
try:
self.terminate_instance()
except:
raise ValueError("Error in terminating the EC2 instance {}.Please terminate it manually ".format(self.instanceId[0]))
<s> import json
from pathlib import Path
import shutil
class mergeLogs():
def __init__(self, folders, dataLocation=None):
self.folders = [Path(x) for x in folders]
self.dataLocation = dataLocation
self.baseFolder = ""
self.outputData = {}
def readOutputStr(self, data):
text = "-------> Output :"
output = data.find(text)
def keywithmaxval(self, d):
""" a) create a list of the dict's keys and values;
b) return the key with the max value"""
v=list(d.values())
k=list(d.keys())
return k[v.index(max(v))]
def getBestScore(self, data):
text = "-------> Output :"
output = [x[len(text):-1] for x in data if text in x]
self.outputData = json.loads(output[0])
return self.outputData['data']['BestScore']
def getModelParams(self, data):
capture = False
startText = "---------- ClassifierModel has started ----------"
endText = "---------- ClassifierModel End ---------- "
modelBasedText = "Models Based Selected Features Start"
CorrelationBased = "Top/CorrelationBased Features Start"
removableText = "Status:- |... Search Optimization Method applied: random\\n"
modelsParam = []
modelcorrelation = None
output = {}
for x in data:
if endText in x:
capture = False
output[modelcorrelation] = ''.join(modelsParam)
modelcorrelation = None
modelsParam = []
elif capture:
if x != removableText:
modelsParam.append(x)
elif startText in x:
capture = True
elif modelBasedText in x:
modelcorrelation = 'modelBased'
elif CorrelationBased in x:
modelcorrelation = 'correlationBased'
return output
def mergeConfigFiles(self, bestScoreFolder):
# path is already updated
with open(bestScoreFolder/'etc'/'code_config.json', 'r') as f:
config = json.load(f)
if self.dataLocation:
config['dataLocation'] = self.dataLocation
if 'modelVersion' in config.keys():
config['modelVersion'] = '_'.join(config['modelVersion'].split('_')[:-1])
with open(bestScoreFolder/'etc'/'code_config.json', 'w') as f:
json.dump(config, f, indent=4)
with open(bestScoreFolder/'etc'/'display.json', 'r') as f:
config = json.load(f)
if 'version' in config.keys():
config['version'] = '_'.join(config['version'].split('_')[:-1])
with open(bestScoreFolder/'etc'/'display.json', 'w') as f:
json.dump(config, f, indent=4)
if len(self.folders) > 1:
with open(bestScoreFolder/'etc'/'output.json', 'r') as f:
config = json.load(f)
evaluated_models = config['data']['EvaluatedModels']
for folder in self.folders:
if folder != bestScoreFolder:
with open(folder/'etc'/'output.json', 'r') as f:
sub_config = json.load(f)
for evaluated_model in sub_config['data']['EvaluatedModels']:
evaluated_models.append(evaluated_model)
with open(bestScoreFolder/'etc'/'output.json', 'w') as f:
config['data']['EvaluatedModels'] = evaluated_models
json.dump(config, f, indent=4)
def mergeLogFiles(self, bestScoreFolder, data):
startText = "---------- ClassifierModel has started ----------\\n"
endText = "---------- ClassifierModel End ---------- \\n"
modelBasedText = "Models Based Selected Features Start"
CorrelationBased = "Top/CorrelationBased Features Start"
with open(bestScoreFolder/'log'/'model_training_logs.log', 'r') as f:
text = f.read()
CorrelationBasedIndex = text.find(CorrelationBased)
modelBasedTextIndex = text.find(modelBasedText)
firstendIndex = text.find(endText)
numOfMethods = 0
if CorrelationBasedIndex > 0:
numOfMethods += 1
if modelBasedTextIndex > 0:
numOfMethods += 1
if numOfMethods == 2:
secondendIndex = text[firstendIndex+ len(endText):].find(endText) +firstendIndex+len(endText)
# assuming correlation is always first
for k,v in data.items():
if k != bestScoreFolder:
if 'correlationBased' in v.keys():
text = text[:firstendIndex] + v['correlationBased'] + text[firstendIndex:]
firstendIndex += len(v['correlationBased'])
if numOfMethods == 2:
secondendIndex += len(v['correlationBased'])
if 'modelBased' in v.keys():
if numOfMethods == 2:
text = text[:secondendIndex] + v['modelBased'] + text[secondendIndex:]
secondendIndex += len(v['modelBased'])
else:
text = text[:firstendIndex] + v['modelBased'] + text[firstendIndex:]
firstendIndex += len(v['modelBased'])
with open(bestScoreFolder/'log'/'model_training_logs.log', 'w') as f:
text = text.replace(str(bestScoreFolder), str(self.baseFolder))
f.write(text)
def mergeFolder(self):
bestScoreInFile |
= {}
modelsTrainOutput = {}
self.baseFolder = self.folders[0].parent/"_".join(self.folders[0].name.split('_')[:-1])
if len(self.folders) == 1:
if self.baseFolder.exists():
shutil.rmtree(self.baseFolder)
|
predictions (:obj:`list` of :obj:`Prediction\\
<surprise.prediction_algorithms.predictions.Prediction>`):
A list of predictions, as returned by the :meth:`test()
<surprise.prediction_algorithms.algo_base.AlgoBase.test>` method.
verbose: If True, will print computed value. Default is ``True``.
Returns:
The Mean Squared Error of predictions.
Raises:
ValueError: When ``predictions`` is empty.
"""
if not predictions:
raise ValueError("Prediction list is empty.")
mse_ = np.mean(
[float((true_r - est) ** 2) for (_, _, true_r, est, _) in predictions]
)
if verbose:
print(f"MSE: {mse_:1.4f}")
return mse_
def mae(predictions, verbose=True):
"""Compute MAE (Mean Absolute Error).
.. math::
\\\\text{MAE} = \\\\frac{1}{|\\\\hat{R}|} \\\\sum_{\\\\hat{r}_{ui} \\\\in
\\\\hat{R}}|r_{ui} - \\\\hat{r}_{ui}|
Args:
predictions (:obj:`list` of :obj:`Prediction\\
<surprise.prediction_algorithms.predictions.Prediction>`):
A list of predictions, as returned by the :meth:`test()
<surprise.prediction_algorithms.algo_base.AlgoBase.test>` method.
verbose: If True, will print computed value. Default is ``True``.
Returns:
The Mean Absolute Error of predictions.
Raises:
ValueError: When ``predictions`` is empty.
"""
if not predictions:
raise ValueError("Prediction list is empty.")
mae_ = np.mean([float(abs(true_r - est)) for (_, _, true_r, est, _) in predictions])
if verbose:
print(f"MAE: {mae_:1.4f}")
return mae_
def fcp(predictions, verbose=True):
"""Compute FCP (Fraction of Concordant Pairs).
Computed as described in paper `Collaborative Filtering on Ordinal User
Feedback <https://www.ijcai.org/Proceedings/13/Papers/449.pdf>`_ by Koren
and Sill, section 5.2.
Args:
predictions (:obj:`list` of :obj:`Prediction\\
<surprise.prediction_algorithms.predictions.Prediction>`):
A list of predictions, as returned by the :meth:`test()
<surprise.prediction_algorithms.algo_base.AlgoBase.test>` method.
verbose: If True, will print computed value. Default is ``True``.
Returns:
The Fraction of Concordant Pairs.
Raises:
ValueError: When ``predictions`` is empty.
"""
if not predictions:
raise ValueError("Prediction list is empty.")
predictions_u = defaultdict(list)
nc_u = defaultdict(int)
nd_u = defaultdict(int)
for u0, _, r0, est, _ in predictions:
predictions_u[u0].append((r0, est))
for u0, preds in predictions_u.items():
for r0i, esti in preds:
for r0j, estj in preds:
if esti > estj and r0i > r0j:
nc_u[u0] += 1
if esti >= estj and r0i < r0j:
nd_u[u0] += 1
nc = np.mean(list(nc_u.values())) if nc_u else 0
nd = np.mean(list(nd_u.values())) if nd_u else 0
try:
fcp = nc / (nc + nd)
except ZeroDivisionError:
raise ValueError(
"cannot compute fcp on this list of prediction. "
+ "Does every user have at least two predictions?"
)
if verbose:
print(f"FCP: {fcp:1.4f}")
return fcp
<s> """
The :mod:`dataset <surprise.dataset>` module defines the :class:`Dataset` class
and other subclasses which are used for managing datasets.
Users may use both *built-in* and user-defined datasets (see the
:ref:`getting_started` page for examples). Right now, three built-in datasets
are available:
* The `movielens-100k <https://grouplens.org/datasets/movielens/>`_ dataset.
* The `movielens-1m <https://grouplens.org/datasets/movielens/>`_ dataset.
* The `Jester <https://eigentaste.berkeley.edu/dataset/>`_ dataset 2.
Built-in datasets can all be loaded (or downloaded if you haven't already)
using the :meth:`Dataset.load_builtin` method.
Summary:
.. autosummary::
:nosignatures:
Dataset.load_builtin
Dataset.load_from_file
Dataset.load_from_folds
"""
import itertools
import os
import sys
from collections import defaultdict
from .builtin_datasets import BUILTIN_DATASETS, download_builtin_dataset
from .reader import Reader
from .trainset import Trainset
class Dataset:
"""Base class for loading datasets.
Note that you should never instantiate the :class:`Dataset` class directly
(same goes for its derived classes), but instead use one of the three
available methods for loading datasets."""
def __init__(self, reader):
self.reader = reader
@classmethod
def load_builtin(cls, name="ml-100k", prompt=True):
"""Load a built-in dataset.
If the dataset has not already been loaded, it will be downloaded and
saved. You will have to split your dataset using the :meth:`split
<DatasetAutoFolds.split>` method. See an example in the :ref:`User
Guide <cross_validate_example>`.
Args:
name(:obj:`string`): The name of the built-in dataset to load.
Accepted values are 'ml-100k', 'ml-1m', and 'jester'.
Default is 'ml-100k'.
prompt(:obj:`bool`): Prompt before downloading if dataset is not
already on disk.
Default is True.
Returns:
A :obj:`Dataset` object.
Raises:
ValueError: If the ``name`` parameter is incorrect.
"""
try:
dataset = BUILTIN_DATASETS[name]
except KeyError:
raise ValueError(
"unknown dataset "
+ name
+ ". Accepted values are "
+ ", ".join(BUILTIN_DATASETS.keys())
+ "."
)
# if dataset does not exist, offer to download it
if not os.path.isfile(dataset.path):
answered = not prompt
while not answered:
print(
"Dataset " + name + " could not be found. Do you want "
"to download it? [Y/n] ",
end="",
)
choice = input().lower()
if choice in ["yes", "y", "", "omg this is so nice of you!!"]:
answered = True
elif choice in ["no", "n", "hell no why would i want that?!"]:
answered = True
print("Ok then, I'm out!")
sys.exit()
download_builtin_dataset(name)
reader = Reader(**dataset.reader_params)
return cls.load_from_file(file_path=dataset.path, reader=reader)
@classmethod
def load_from_file(cls, file_path, reader):
"""Load a dataset from a (custom) file.
Use this if you want to use a custom dataset and all of the ratings are
stored in one file. You will have to split your dataset using the
:meth:`split <DatasetAutoFolds.split>` method. See an example in the
:ref:`User Guide <load_from_file_example>`.
Args:
file_path(:obj:`string`): The path to the file containing ratings.
reader(:obj:`Reader <surprise.reader.Reader>`): A reader to read
the file.
"""
return DatasetAutoFolds(ratings_file=file_path, reader=reader)
@classmethod
def load_from_folds(cls, folds_files, reader):
"""Load a dataset where folds (for cross-validation) are predefined by
some files.
The purpose of this method is to cover a common use case where a
dataset is already split into predefined folds, such as the
movielens-100k dataset which defines files u1.base, u1.test, u2.base,
u2.test, etc... It can also be used when you don't want to perform
cross-validation but still want to specify your training and testing
data (which comes down to 1-fold cross-validation anyway). See an
example in the :ref:`User Guide <load_from_folds_example>`.
Args:
folds_files(:obj:`iterable` of :obj:`tuples`): The list of the
folds. A fold is a tuple of the form ``(path_to_train_file,
path_to_test_file)``.
reader(:obj:`Reader <surprise.reader.Reader>`): A reader to read
the files.
"""
return DatasetUserFolds(folds_files=folds_files, reader=reader)
@classmethod
def load_from_df(cls, df, reader):
"""Load a dataset from a pandas dataframe.
Use this if you want to use a custom dataset that is stored in a pandas
dataframe. See the :ref:`User Guide<load_from_df_example>` for an
example.
Args:
df(`Dataframe`): The dataframe containing the ratings. It must have
three columns, corresponding to the user (raw) ids, the item
(raw) ids, and the ratings, in this order.
reader(:obj:`Reader <surprise.reader.Reader>`): A reader to read
the file. Only the ``rating_scale`` field needs to be
specified.
"""
return DatasetAutoFolds(reader=reader, df=df)
def read_ratings(self, file_name):
"""Return a list of ratings (user, item, rating, timestamp) read from
file_name"""
with open(os.path.expanduser(file_name)) as f:
raw_ratings = [
self.reader.parse_line(line)
for line in itertools.islice(f, self.reader.skip_lines, None)
]
return raw_ratings
def construct_trainset(self, raw_trainset):
raw2inner_id_users = {}
raw2inner_id_items = {}
current_u_index = 0
current_i_index = 0
ur = defaultdict(list)
ir = defaultdict(list)
# user raw id, item raw id, translated rating, time stamp
for urid, irid, r, timestamp in raw_trainset:
try:
uid = raw2inner_id_users[urid]
except KeyError:
uid = current_u_index
raw2inner_id_users[urid] = current_u_index
current_u_index += 1
try:
iid = raw2inner_id_items[irid]
except KeyError:
iid = current_i_index
raw2inner_id_items[irid] = current_i_index
current_i_index += 1
ur[uid].append((iid, r))
ir[iid].append((uid, r))
n_users = len(ur) # number of users
n_items = len(ir) # number of items
n_ratings = len(raw_trainset)
trainset = Trainset(
ur,
ir,
n_users,
n_items,
n_ratings,
self.reader.rating_scale,
raw2inner_id_users,
raw2inner_id_items,
)
return trainset
def construct_testset(self, raw_testset):
return [(ruid, riid, r_ui_trans) for (ruid, riid, r_ui_trans, _) in raw_testset]
class DatasetUserFolds(Dataset):
"""A derived class from :class:`Dataset` for which folds (for
cross-validation) are predefined."""
def __init__(self, folds_files=None, reader=None):
Dataset.__init__(self, reader)
self.folds_files = folds_files
# check that all files actually exist.
for train_test_files in self.folds_files:
for f in train_test_files:
if not os.path.isfile(os.path.expanduser(f)):
raise ValueError("File " + str(f) + " does not exist.")
class DatasetAutoFolds(Dataset):
"""A derived class from :class:`Dataset` for which folds (for
cross-validation) are not predefined. (Or for when there are no folds at
all)."""
def __init__(self, ratings_file=None, reader=None, df=None):
Dataset.__init__(self, reader)
self.has_been_split = False # flag indicating if split() was called.
if ratings_file is not None:
self.ratings_file = ratings_file
self.raw_ratings = self.read_ratings(self.ratings_file)
elif df is not None:
self.df = df
self.raw_ratings = [
(uid, iid, float(r), None)
for (uid, iid, r) in self.df.itertuples(index=False)
]
else:
raise ValueError("Must specify ratings file or dataframe.")
def build_full_trainset(self):
"""Do not split the dataset into folds and just return a trainset as
is, built from the whole dataset.
User can then query for predictions, as shown in the :ref:`User Guide
<train_on_whole_trainset>`.
Returns:
The :class:`Trainset <surprise.Trainset>`.
"""
return self.construct_trainset(self.raw_ratings)
<s> from pkg_resources import get_distribution
from . import dump, model_selection
from .builtin_datasets import get_dataset_dir
from .dataset import Dataset
from .prediction_algorithms import (
AlgoBase,
BaselineOnly,
CoClustering,
KNNBaseline,
KNNBasic,
KNNWithMeans,
KNNWithZScore,
NMF,
NormalPredictor,
Prediction,
PredictionImpossible,
SlopeOne,
SVD,
SVDpp,
)
from .reader import Reader
from .trainset import Trainset
__all__ = [
"AlgoBase",
"NormalPredictor",
"BaselineOnly",
"KNNBasic",
"KNNWithMeans",
"KNNBaseline",
"SVD",
"SVDpp",
"NMF",
"SlopeOne",
"CoClustering",
"PredictionImpossible",
" |
Prediction",
"Dataset",
"Reader",
"Trainset",
"dump",
"KNNWithZScore",
"get_dataset_dir",
"model_selection",
]
__version__ = get_distribution("scikit-surprise").version
<s> """This module contains the Reader class."""
from .builtin_datasets import BUILTIN_DATASETS
class Reader:
"""The Reader class is used to parse a file containing ratings.
Such a file is assumed to specify only one rating per line, and each line
needs to respect the following structure: ::
user ; item ; rating ; [timestamp]
where the order of the fields and the separator (here ';') may be
arbitrarily defined (see below). brackets indicate that the timestamp
field is optional.
For each built-in dataset, Surprise also provides predefined readers which
are useful if you want to use a custom dataset that has the same format as
a built-in one (see the ``name`` parameter).
Args:
name(:obj:`string`, optional): If specified, a Reader for one of the
built-in datasets is returned and any other parameter is ignored.
Accepted values are 'ml-100k', 'ml-1m', and 'jester'. Default
is ``None``.
line_format(:obj:`string`): The fields names, in the order at which
they are encountered on a line. Please note that ``line_format`` is
always space-separated (use the ``sep`` parameter). Default is
``'user item rating'``.
sep(char): the separator between fields. Example : ``';'``.
rating_scale(:obj:`tuple`, optional): The rating scale used for every
rating. Default is ``(1, 5)``.
skip_lines(:obj:`int`, optional): Number of lines to skip at the
beginning of the file. Default is ``0``.
"""
def __init__(
self,
name=None,
line_format="user item rating",
sep=None,
rating_scale=(1, 5),
skip_lines=0,
):
if name:
try:
self.__init__(**BUILTIN_DATASETS[name].reader_params)
except KeyError:
raise ValueError(
"unknown reader "
+ name
+ ". Accepted values are "
+ ", ".join(BUILTIN_DATASETS.keys())
+ "."
)
else:
self.sep = sep
self.skip_lines = skip_lines
self.rating_scale = rating_scale
lower_bound, higher_bound = rating_scale
splitted_format = line_format.split()
entities = ["user", "item", "rating"]
if "timestamp" in splitted_format:
self.with_timestamp = True
entities.append("timestamp")
else:
self.with_timestamp = False
# check that all fields are correct
if any(field not in entities for field in splitted_format):
raise ValueError("line_format parameter is incorrect.")
self.indexes = [splitted_format.index(entity) for entity in entities]
def parse_line(self, line):
"""Parse a line.
Ratings are translated so that they are all strictly positive.
Args:
line(str): The line to parse
Returns:
tuple: User id, item id, rating and timestamp. The timestamp is set
to ``None`` if it does no exist.
"""
line = line.split(self.sep)
try:
if self.with_timestamp:
uid, iid, r, timestamp = (line[i].strip() for i in self.indexes)
else:
uid, iid, r = (line[i].strip() for i in self.indexes)
timestamp = None
except IndexError:
raise ValueError(
"Impossible to parse line. Check the line_format" " and sep parameters."
)
return uid, iid, float(r), timestamp
<s> #!/usr/bin/env python
import argparse
import os
import random as rd
import shutil
import sys
import numpy as np
import surprise.dataset as dataset
from surprise import __version__
from surprise.builtin_datasets import get_dataset_dir
from surprise.dataset import Dataset
from surprise.model_selection import cross_validate, KFold, PredefinedKFold
from surprise.prediction_algorithms import (
BaselineOnly,
CoClustering,
KNNBaseline,
KNNBasic,
KNNWithMeans,
NMF,
NormalPredictor,
SlopeOne,
SVD,
SVDpp,
)
from surprise.reader import Reader # noqa
def main():
class MyParser(argparse.ArgumentParser):
"""A parser which prints the help message when an error occurs. Taken from
https://stackoverflow.com/questions/4042452/display-help-message-with-python-argparse-when-script-is-called-without-any-argu.""" # noqa
def error(self, message):
sys.stderr.write("error: %s\\n" % message)
self.print_help()
sys.exit(2)
parser = MyParser(
description="Evaluate the performance of a rating prediction "
+ "algorithm "
+ "on a given dataset using cross validation. You can use a built-in "
+ "or a custom dataset, and you can choose to automatically split the "
+ "dataset into folds, or manually specify train and test files. "
+ "Please refer to the documentation page "
+ "(https://surprise.readthedocs.io/) for more details.",
epilog="""Example:\\n
surprise -algo SVD -params "{'n_epochs': 5, 'verbose': True}"
-load-builtin ml-100k -n-folds 3""",
)
algo_choices = {
"NormalPredictor": NormalPredictor,
"BaselineOnly": BaselineOnly,
"KNNBasic": KNNBasic,
"KNNBaseline": KNNBaseline,
"KNNWithMeans": KNNWithMeans,
"SVD": SVD,
"SVDpp": SVDpp,
"NMF": NMF,
"SlopeOne": SlopeOne,
"CoClustering": CoClustering,
}
parser.add_argument(
"-algo",
type=str,
choices=algo_choices,
help="The prediction algorithm to use. "
+ "Allowed values are "
+ ", ".join(algo_choices.keys())
+ ".",
metavar="<prediction algorithm>",
)
parser.add_argument(
"-params",
type=str,
metavar="<algorithm parameters>",
default="{}",
help="A kwargs dictionary that contains all the "
+ "algorithm parameters."
+ "Example: \\"{'n_epochs': 10}\\".",
)
parser.add_argument(
"-load-builtin",
type=str,
dest="load_builtin",
metavar="<dataset name>",
default="ml-100k",
help="The name of the built-in dataset to use."
+ "Allowed values are "
+ ", ".join(dataset.BUILTIN_DATASETS.keys())
+ ". Default is ml-100k.",
)
parser.add_argument(
"-load-custom",
type=str,
dest="load_custom",
metavar="<file path>",
default=None,
help="A file path to custom dataset to use. "
+ "Ignored if "
+ "-loadbuiltin is set. The -reader parameter needs "
+ "to be set.",
)
parser.add_argument(
"-folds-files",
type=str,
dest="folds_files",
metavar="<train1 test1 train2 test2... >",
default=None,
help="A list of custom train and test files. "
+ "Ignored if -load-builtin or -load-custom is set. "
"The -reader parameter needs to be set.",
)
parser.add_argument(
"-reader",
type=str,
metavar="<reader>",
default=None,
help="A Reader to read the custom dataset. Example: "
+ "\\"Reader(line_format='user item rating timestamp',"
+ " sep='\\\\t')\\"",
)
parser.add_argument(
"-n-folds",
type=int,
dest="n_folds",
metavar="<number of folds>",
default=5,
help="The number of folds for cross-validation. " + "Default is 5.",
)
parser.add_argument(
"-seed",
type=int,
metavar="<random seed>",
default=None,
help="The seed to use for RNG. " + "Default is the current system time.",
)
parser.add_argument(
"--with-dump",
dest="with_dump",
action="store_true",
help="Dump the algorithm "
+ "results in a file (one file per fold). "
+ "Default is False.",
)
parser.add_argument(
"-dump-dir",
dest="dump_dir",
type=str,
metavar="<dir>",
default=None,
help="Where to dump the files. Ignored if "
+ "with-dump is not set. Default is "
+ os.path.join(get_dataset_dir(), "dumps/"),
)
parser.add_argument(
"--clean",
dest="clean",
action="store_true",
help="Remove the " + get_dataset_dir() + " directory and exit.",
)
parser.add_argument("-v", "--version", action="version", version=__version__)
args = parser.parse_args()
if args.clean:
folder = get_dataset_dir()
shutil.rmtree(folder)
print("Removed", folder)
exit()
# setup RNG
rd.seed(args.seed)
np.random.seed(args.seed)
# setup algorithm
params = eval(args.params)
if args.algo is None:
parser.error("No algorithm was specified.")
algo = algo_choices[args.algo](**params)
# setup dataset
if args.load_custom is not None: # load custom and split
if args.reader is None:
parser.error("-reader parameter is needed.")
reader = eval(args.reader)
data = Dataset.load_from_file(args.load_custom, reader=reader)
cv = KFold(n_splits=args.n_folds, random_state=args.seed)
elif args.folds_files is not None: # load from files
if args.reader is None:
parser.error("-reader parameter is needed.")
reader = eval(args.reader)
folds_files = args.folds_files.split()
folds_files = [
(folds_files[i], folds_files[i + 1])
for i in range(0, len(folds_files) - 1, 2)
]
data = Dataset.load_from_folds(folds_files=folds_files, reader=reader)
cv = PredefinedKFold()
else: # load builtin dataset and split
data = Dataset.load_builtin(args.load_builtin)
cv = KFold(n_splits=args.n_folds, random_state=args.seed)
cross_validate(algo, data, cv=cv, verbose=True)
if __name__ == "__main__":
main()
<s> """This module contains the Trainset class."""
import numpy as np
class Trainset:
"""A trainset contains all useful data that constitute a training set.
It is used by the :meth:`fit()
<surprise.prediction_algorithms.algo_base.AlgoBase.fit>` method of every
prediction algorithm. You should not try to build such an object on your
own but rather use the :meth:`Dataset.folds()
<surprise.dataset.Dataset.folds>` method or the
:meth:`DatasetAutoFolds.build_full_trainset()
<surprise.dataset.DatasetAutoFolds.build_full_trainset>` method.
Trainsets are different from :class:`Datasets <surprise.dataset.Dataset>`.
You can think of a :class:`Dataset <surprise.dataset.Dataset>` as the raw
data, and Trainsets as higher-level data where useful methods are defined.
Also, a :class:`Dataset <surprise.dataset.Dataset>` may be comprised of
multiple Trainsets (e.g. when doing cross validation).
Attributes:
ur(:obj:`defaultdict` of :obj:`list`): The users ratings. This is a
dictionary containing lists of tuples of the form ``(item_inner_id,
rating)``. The keys are user inner ids.
ir(:obj:`defaultdict` of :obj:`list`): The items ratings. This is a
dictionary containing lists of tuples of the form ``(user_inner_id,
rating)``. The keys are item inner ids.
n_users: Total number of users :math:`|U|`.
n_items: Total number of items :math:`|I|`.
n_ratings: Total number of ratings :math:`|R_{train}|`.
rating_scale(tuple): The minimum and maximal rating of the rating
scale.
global_mean: The mean of all ratings :math:`\\\\mu`.
"""
def __init__(
self,
ur,
ir,
n_users,
n_items,
n_ratings,
rating_scale,
raw2inner_id_users,
raw2inner_id_items,
):
self.ur = ur
self.ir = ir
self.n_users = n_users
self.n_items = n_items
self.n_ratings = n_ratings
self.rating_scale = rating_scale
self._raw2inner_id_users = raw2inner_id_users
self._raw2inner_id_items = raw2inner_id_items
self._global_mean = None
# inner2raw dicts could be built right now (or even before) but they
# are not always useful so we wait until we need them.
self._inner2raw_id_users = None
self._inner2raw_id_items = None
def knows_user(self, uid):
"""Indicate if the user is part of the trainset.
A user is part of the trainset if the user has at least one rating.
Args:
uid(int): The (inner) user id. See :ref:`this
note<raw_inner_note>`.
Returns:
``True`` if user is part of the trainset, else ``False``.
"""
return uid in self.ur
def knows_item(self, iid):
"""Indicate if the item is part of the trainset.
An item is part of the trainset if the item was rated at least once.
Args:
iid(int): The (inner) item id. See :ref:`this
note<raw_inner_note>`.
Returns:
``True`` if item is part of the trainset, else ``False``.
"""
return iid in self.ir
def to_inner_uid(self, ruid):
"""Convert a **user** raw id to an inner id.
|
See :ref:`this note<raw_inner_note>`.
Args:
ruid(str): The user raw id.
Returns:
int: The user inner id.
Raises:
ValueError: When user is not part of the trainset.
"""
try:
return self._raw2inner_id_users[ruid]
except KeyError:
raise ValueError("User " + str(ruid) + " is not part of the trainset.")
def to_raw_uid(self, iuid):
"""Convert a **user** inner id to a raw id.
See :ref:`this note<raw_inner_note>`.
Args:
iuid(int): The user inner id.
Returns:
str: The user raw id.
Raises:
ValueError: When ``iuid`` is not an inner id.
"""
if self._inner2raw_id_users is None:
self._inner2raw_id_users = {
inner: raw for (raw, inner) in self._raw2inner_id_users.items()
}
try:
return self._inner2raw_id_users[iuid]
except KeyError:
raise ValueError(str(iuid) + " is not a valid inner id.")
def to_inner_iid(self, riid):
"""Convert an **item** raw id to an inner id.
See :ref:`this note<raw_inner_note>`.
Args:
riid(str): The item raw id.
Returns:
int: The item inner id.
Raises:
ValueError: When item is not part of the trainset.
"""
try:
return self._raw2inner_id_items[riid]
except KeyError:
raise ValueError("Item " + str(riid) + " is not part of the trainset.")
def to_raw_iid(self, iiid):
"""Convert an **item** inner id to a raw id.
See :ref:`this note<raw_inner_note>`.
Args:
iiid(int): The item inner id.
Returns:
str: The item raw id.
Raises:
ValueError: When ``iiid`` is not an inner id.
"""
if self._inner2raw_id_items is None:
self._inner2raw_id_items = {
inner: raw for (raw, inner) in self._raw2inner_id_items.items()
}
try:
return self._inner2raw_id_items[iiid]
except KeyError:
raise ValueError(str(iiid) + " is not a valid inner id.")
def all_ratings(self):
"""Generator function to iterate over all ratings.
Yields:
A tuple ``(uid, iid, rating)`` where ids are inner ids (see
:ref:`this note <raw_inner_note>`).
"""
for u, u_ratings in self.ur.items():
for i, r in u_ratings:
yield u, i, r
def build_testset(self):
"""Return a list of ratings that can be used as a testset in the
:meth:`test() <surprise.prediction_algorithms.algo_base.AlgoBase.test>`
method.
The ratings are all the ratings that are in the trainset, i.e. all the
ratings returned by the :meth:`all_ratings()
<surprise.Trainset.all_ratings>` generator. This is useful in
cases where you want to to test your algorithm on the trainset.
"""
return [
(self.to_raw_uid(u), self.to_raw_iid(i), r)
for (u, i, r) in self.all_ratings()
]
def build_anti_testset(self, fill=None):
"""Return a list of ratings that can be used as a testset in the
:meth:`test() <surprise.prediction_algorithms.algo_base.AlgoBase.test>`
method.
The ratings are all the ratings that are **not** in the trainset, i.e.
all the ratings :math:`r_{ui}` where the user :math:`u` is known, the
item :math:`i` is known, but the rating :math:`r_{ui}` is not in the
trainset. As :math:`r_{ui}` is unknown, it is either replaced by the
:code:`fill` value or assumed to be equal to the mean of all ratings
:meth:`global_mean <surprise.Trainset.global_mean>`.
Args:
fill(float): The value to fill unknown ratings. If :code:`None` the
global mean of all ratings :meth:`global_mean
<surprise.Trainset.global_mean>` will be used.
Returns:
A list of tuples ``(uid, iid, fill)`` where ids are raw ids.
"""
fill = self.global_mean if fill is None else float(fill)
anti_testset = []
for u in self.all_users():
user_items = {j for (j, _) in self.ur[u]}
anti_testset += [
(self.to_raw_uid(u), self.to_raw_iid(i), fill)
for i in self.all_items()
if i not in user_items
]
return anti_testset
def all_users(self):
"""Generator function to iterate over all users.
Yields:
Inner id of users.
"""
return range(self.n_users)
def all_items(self):
"""Generator function to iterate over all items.
Yields:
Inner id of items.
"""
return range(self.n_items)
@property
def global_mean(self):
if self._global_mean is None:
self._global_mean = np.mean([r for (_, _, r) in self.all_ratings()])
return self._global_mean
<s> """The utils module contains the get_rng function."""
import numbers
import numpy as np
def get_rng(random_state):
"""Return a 'validated' RNG.
If random_state is None, use RandomState singleton from numpy. Else if
it's an integer, consider it's a seed and initialized an rng with that
seed. If it's already an rng, return it.
"""
if random_state is None:
return np.random.mtrand._rand
elif isinstance(random_state, (numbers.Integral, np.integer)):
return np.random.RandomState(random_state)
if isinstance(random_state, np.random.RandomState):
return random_state
raise ValueError(
"Wrong random state. Expecting None, an int or a numpy "
"RandomState instance, got a "
"{}".format(type(random_state))
)
<s> """
the :mod:`knns` module includes some k-NN inspired algorithms.
"""
import heapq
import numpy as np
from .algo_base import AlgoBase
from .predictions import PredictionImpossible
# Important note: as soon as an algorithm uses a similarity measure, it should
# also allow the bsl_options parameter because of the pearson_baseline
# similarity. It can be done explicitly (e.g. KNNBaseline), or implicetely
# using kwargs (e.g. KNNBasic).
class SymmetricAlgo(AlgoBase):
"""This is an abstract class aimed to ease the use of symmetric algorithms.
A symmetric algorithm is an algorithm that can can be based on users or on
items indifferently, e.g. all the algorithms in this module.
When the algo is user-based x denotes a user and y an item. Else, it's
reversed.
"""
def __init__(self, sim_options={}, verbose=True, **kwargs):
AlgoBase.__init__(self, sim_options=sim_options, **kwargs)
self.verbose = verbose
def fit(self, trainset):
AlgoBase.fit(self, trainset)
ub = self.sim_options["user_based"]
self.n_x = self.trainset.n_users if ub else self.trainset.n_items
self.n_y = self.trainset.n_items if ub else self.trainset.n_users
self.xr = self.trainset.ur if ub else self.trainset.ir
self.yr = self.trainset.ir if ub else self.trainset.ur
return self
def switch(self, u_stuff, i_stuff):
"""Return x_stuff and y_stuff depending on the user_based field."""
if self.sim_options["user_based"]:
return u_stuff, i_stuff
else:
return i_stuff, u_stuff
class KNNBasic(SymmetricAlgo):
"""A basic collaborative filtering algorithm.
The prediction :math:`\\\\hat{r}_{ui}` is set as:
.. math::
\\\\hat{r}_{ui} = \\\\frac{
\\\\sum\\\\limits_{v \\\\in N^k_i(u)} \\\\text{sim}(u, v) \\\\cdot r_{vi}}
{\\\\sum\\\\limits_{v \\\\in N^k_i(u)} \\\\text{sim}(u, v)}
or
.. math::
\\\\hat{r}_{ui} = \\\\frac{
\\\\sum\\\\limits_{j \\\\in N^k_u(i)} \\\\text{sim}(i, j) \\\\cdot r_{uj}}
{\\\\sum\\\\limits_{j \\\\in N^k_u(i)} \\\\text{sim}(i, j)}
depending on the ``user_based`` field of the ``sim_options`` parameter.
Args:
k(int): The (max) number of neighbors to take into account for
aggregation (see :ref:`this note <actual_k_note>`). Default is
``40``.
min_k(int): The minimum number of neighbors to take into account for
aggregation. If there are not enough neighbors, the prediction is
set to the global mean of all ratings. Default is ``1``.
sim_options(dict): A dictionary of options for the similarity
measure. See :ref:`similarity_measures_configuration` for accepted
options.
verbose(bool): Whether to print trace messages of bias estimation,
similarity, etc. Default is True.
"""
def __init__(self, k=40, min_k=1, sim_options={}, verbose=True, **kwargs):
SymmetricAlgo.__init__(self, sim_options=sim_options, verbose=verbose, **kwargs)
self.k = k
self.min_k = min_k
def fit(self, trainset):
SymmetricAlgo.fit(self, trainset)
self.sim = self.compute_similarities()
return self
def estimate(self, u, i):
if not (self.trainset.knows_user(u) and self.trainset.knows_item(i)):
raise PredictionImpossible("User and/or item is unknown.")
x, y = self.switch(u, i)
neighbors = [(self.sim[x, x2], r) for (x2, r) in self.yr[y]]
k_neighbors = heapq.nlargest(self.k, neighbors, key=lambda t: t[0])
# compute weighted average
sum_sim = sum_ratings = actual_k = 0
for (sim, r) in k_neighbors:
if sim > 0:
sum_sim += sim
sum_ratings += sim * r
actual_k += 1
if actual_k < self.min_k:
raise PredictionImpossible("Not enough neighbors.")
est = sum_ratings / sum_sim
details = {"actual_k": actual_k}
return est, details
class KNNWithMeans(SymmetricAlgo):
"""A basic collaborative filtering algorithm, taking into account the mean
ratings of each user.
The prediction :math:`\\\\hat{r}_{ui}` is set as:
.. math::
\\\\hat{r}_{ui} = \\\\mu_u + \\\\frac{ \\\\sum\\\\limits_{v \\\\in N^k_i(u)}
\\\\text{sim}(u, v) \\\\cdot (r_{vi} - \\\\mu_v)} {\\\\sum\\\\limits_{v \\\\in
N^k_i(u)} \\\\text{sim}(u, v)}
or
.. math::
\\\\hat{r}_{ui} = \\\\mu_i + \\\\frac{ \\\\sum\\\\limits_{j \\\\in N^k_u(i)}
\\\\text{sim}(i, j) \\\\cdot (r_{uj} - \\\\mu_j)} {\\\\sum\\\\limits_{j \\\\in
N^k_u(i)} \\\\text{sim}(i, j)}
depending on the ``user_based`` field of the ``sim_options`` parameter.
Args:
k(int): The (max) number of neighbors to take into account for
aggregation (see :ref:`this note <actual_k_note>`). Default is
``40``.
min_k(int): The minimum number of neighbors to take into account for
aggregation. If there are not enough neighbors, the neighbor
aggregation is set to zero (so the prediction ends up being
equivalent to the mean :math:`\\\\mu_u` or :math:`\\\\mu_i`). Default is
``1``.
sim_options(dict): A dictionary of options for the similarity
measure. See :ref:`similarity_measures_configuration` for accepted
options.
verbose(bool): Whether to print trace messages of bias estimation,
similarity, etc. Default is True.
"""
def __init__(self, k=40, min_k=1, sim_options={}, verbose=True, **kwargs):
SymmetricAlgo.__init__(self, sim_options=sim_options, verbose=verbose, **kwargs)
self.k = k
self.min_k = min_k
def fit(self, trainset):
SymmetricAlgo.fit(self, trainset)
self.sim = self.compute_similarities()
self.means = np.zeros(self.n_x)
for x, ratings in self.xr.items():
self.means[x] = np.mean([r for (_, r) in ratings])
return self
def estimate(self, u, i):
if not (self.trainset.knows_user(u) and self.trainset.knows_item(i)):
raise PredictionImpossible("User and/or item is unknown.")
x, y = self.switch(u, i)
neighbors = [(x2, self.sim[x, x2], r) for (x2, r) in self.yr[y]]
k_neighbors = heapq.nlargest(self.k, neighbors, key=lambda t: t[1])
est = self.means[x]
# compute weighted average
sum_sim = sum_ratings = actual_k = 0
for (nb, sim, r) in k_neighbors:
if sim > 0:
sum_sim += sim
sum_ratings += sim * (r - self.means[nb])
actual_k += 1
if actual_k < self.min_k:
sum_ratings = 0
try:
est += sum_ratings / sum_sim
except ZeroDivisionError:
pass # return mean
details = {"actual_k": actual_k}
return est, details
class KNNBaseline(SymmetricAlgo):
"""A basic collaborative filtering algorithm taking into account a
*bas |
eline* rating.
The prediction :math:`\\\\hat{r}_{ui}` is set as:
.. math::
\\\\hat{r}_{ui} = b_{ui} + \\\\frac{ \\\\sum\\\\limits_{v \\\\in N^k_i(u)}
\\\\text{sim}(u, v) \\\\cdot (r_{vi} - b_{vi})} {\\\\sum\\\\limits_{v \\\\in
N^k_i(u)} \\\\text{sim}(u, v)}
or
.. math::
\\\\hat{r}_{ui} = b_{ui} + \\\\frac{ \\\\sum\\\\limits_{j \\\\in N^k_u(i)}
\\\\text{sim}(i, j) \\\\cdot (r_{uj} - b_{uj})} {\\\\sum\\\\limits_{j \\\\in
N^k_u(i)} \\\\text{sim}(i, j)}
depending on the ``user_based`` field of the ``sim_options`` parameter. For
the best predictions, use the :func:`pearson_baseline
<surprise.similarities.pearson_baseline>` similarity measure.
This algorithm corresponds to formula (3), section 2.2 of
:cite:`Koren:2010`.
Args:
k(int): The (max) number of neighbors to take into account for
aggregation (see :ref:`this note <actual_k_note>`). Default is
``40``.
min_k(int): The minimum number of neighbors to take into account for
aggregation. If there are not enough neighbors, the neighbor
aggregation is set to zero (so the prediction ends up being
equivalent to the baseline). Default is ``1``.
sim_options(dict): A dictionary of options for the similarity
measure. See :ref:`similarity_measures_configuration` for accepted
options. It is recommended to use the :func:`pearson_baseline
<surprise.similarities.pearson_baseline>` similarity measure.
bsl_options(dict): A dictionary of options for the baseline estimates
computation. See :ref:`baseline_estimates_configuration` for
accepted options.
verbose(bool): Whether to print trace messages of bias estimation,
similarity, etc. Default is True.
"""
def __init__(
self, k=40, min_k=1, sim_options={}, bsl_options={}, verbose=True, **kwargs
):
SymmetricAlgo.__init__(
self,
sim_options=sim_options,
bsl_options=bsl_options,
verbose=verbose,
**kwargs
)
self.k = k
self.min_k = min_k
def fit(self, trainset):
SymmetricAlgo.fit(self, trainset)
self.bu, self.bi = self.compute_baselines()
self.bx, self.by = self.switch(self.bu, self.bi)
self.sim = self.compute_similarities()
return self
def estimate(self, u, i):
est = self.trainset.global_mean
if self.trainset.knows_user(u):
est += self.bu[u]
if self.trainset.knows_item(i):
est += self.bi[i]
x, y = self.switch(u, i)
if not (self.trainset.knows_user(u) and self.trainset.knows_item(i)):
return est
neighbors = [(x2, self.sim[x, x2], r) for (x2, r) in self.yr[y]]
k_neighbors = heapq.nlargest(self.k, neighbors, key=lambda t: t[1])
# compute weighted average
sum_sim = sum_ratings = actual_k = 0
for (nb, sim, r) in k_neighbors:
if sim > 0:
sum_sim += sim
nb_bsl = self.trainset.global_mean + self.bx[nb] + self.by[y]
sum_ratings += sim * (r - nb_bsl)
actual_k += 1
if actual_k < self.min_k:
sum_ratings = 0
try:
est += sum_ratings / sum_sim
except ZeroDivisionError:
pass # just baseline again
details = {"actual_k": actual_k}
return est, details
class KNNWithZScore(SymmetricAlgo):
"""A basic collaborative filtering algorithm, taking into account
the z-score normalization of each user.
The prediction :math:`\\\\hat{r}_{ui}` is set as:
.. math::
\\\\hat{r}_{ui} = \\\\mu_u + \\\\sigma_u \\\\frac{ \\\\sum\\\\limits_{v \\\\in N^k_i(u)}
\\\\text{sim}(u, v) \\\\cdot (r_{vi} - \\\\mu_v) / \\\\sigma_v} {\\\\sum\\\\limits_{v
\\\\in N^k_i(u)} \\\\text{sim}(u, v)}
or
.. math::
\\\\hat{r}_{ui} = \\\\mu_i + \\\\sigma_i \\\\frac{ \\\\sum\\\\limits_{j \\\\in N^k_u(i)}
\\\\text{sim}(i, j) \\\\cdot (r_{uj} - \\\\mu_j) / \\\\sigma_j} {\\\\sum\\\\limits_{j
\\\\in N^k_u(i)} \\\\text{sim}(i, j)}
depending on the ``user_based`` field of the ``sim_options`` parameter.
If :math:`\\\\sigma` is 0, than the overall sigma is used in that case.
Args:
k(int): The (max) number of neighbors to take into account for
aggregation (see :ref:`this note <actual_k_note>`). Default is
``40``.
min_k(int): The minimum number of neighbors to take into account for
aggregation. If there are not enough neighbors, the neighbor
aggregation is set to zero (so the prediction ends up being
equivalent to the mean :math:`\\\\mu_u` or :math:`\\\\mu_i`). Default is
``1``.
sim_options(dict): A dictionary of options for the similarity
measure. See :ref:`similarity_measures_configuration` for accepted
options.
verbose(bool): Whether to print trace messages of bias estimation,
similarity, etc. Default is True.
"""
def __init__(self, k=40, min_k=1, sim_options={}, verbose=True, **kwargs):
SymmetricAlgo.__init__(self, sim_options=sim_options, verbose=verbose, **kwargs)
self.k = k
self.min_k = min_k
def fit(self, trainset):
SymmetricAlgo.fit(self, trainset)
self.means = np.zeros(self.n_x)
self.sigmas = np.zeros(self.n_x)
# when certain sigma is 0, use overall sigma
self.overall_sigma = np.std([r for (_, _, r) in self.trainset.all_ratings()])
for x, ratings in self.xr.items():
self.means[x] = np.mean([r for (_, r) in ratings])
sigma = np.std([r for (_, r) in ratings])
self.sigmas[x] = self.overall_sigma if sigma == 0.0 else sigma
self.sim = self.compute_similarities()
return self
def estimate(self, u, i):
if not (self.trainset.knows_user(u) and self.trainset.knows_item(i)):
raise PredictionImpossible("User and/or item is unknown.")
x, y = self.switch(u, i)
neighbors = [(x2, self.sim[x, x2], r) for (x2, r) in self.yr[y]]
k_neighbors = heapq.nlargest(self.k, neighbors, key=lambda t: t[1])
est = self.means[x]
# compute weighted average
sum_sim = sum_ratings = actual_k = 0
for (nb, sim, r) in k_neighbors:
if sim > 0:
sum_sim += sim
sum_ratings += sim * (r - self.means[nb]) / self.sigmas[nb]
actual_k += 1
if actual_k < self.min_k:
sum_ratings = 0
try:
est += sum_ratings / sum_sim * self.sigmas[x]
except ZeroDivisionError:
pass # return mean
details = {"actual_k": actual_k}
return est, details
<s> """
This class implements the baseline estimation.
"""
from .algo_base import AlgoBase
class BaselineOnly(AlgoBase):
r"""Algorithm predicting the baseline estimate for given user and item.
:math:`\\hat{r}_{ui} = b_{ui} = \\mu + b_u + b_i`
If user :math:`u` is unknown, then the bias :math:`b_u` is assumed to be
zero. The same applies for item :math:`i` with :math:`b_i`.
See section 2.1 of :cite:`Koren:2010` for details.
Args:
bsl_options(dict): A dictionary of options for the baseline estimates
computation. See :ref:`baseline_estimates_configuration` for
accepted options.
verbose(bool): Whether to print trace messages of bias estimation,
similarity, etc. Default is True.
"""
def __init__(self, bsl_options={}, verbose=True):
AlgoBase.__init__(self, bsl_options=bsl_options)
self.verbose = verbose
def fit(self, trainset):
AlgoBase.fit(self, trainset)
self.bu, self.bi = self.compute_baselines()
return self
def estimate(self, u, i):
est = self.trainset.global_mean
if self.trainset.knows_user(u):
est += self.bu[u]
if self.trainset.knows_item(i):
est += self.bi[i]
return est
<s> """
The :mod:`prediction_algorithms` package includes the prediction algorithms
available for recommendation.
The available prediction algorithms are:
.. autosummary::
:nosignatures:
random_pred.NormalPredictor
baseline_only.BaselineOnly
knns.KNNBasic
knns.KNNWithMeans
knns.KNNWithZScore
knns.KNNBaseline
matrix_factorization.SVD
matrix_factorization.SVDpp
matrix_factorization.NMF
slope_one.SlopeOne
co_clustering.CoClustering
"""
from .algo_base import AlgoBase
from .baseline_only import BaselineOnly
from .co_clustering import CoClustering
from .knns import KNNBaseline, KNNBasic, KNNWithMeans, KNNWithZScore
from .matrix_factorization import NMF, SVD, SVDpp
from .predictions import Prediction, PredictionImpossible
from .random_pred import NormalPredictor
from .slope_one import SlopeOne
__all__ = [
"AlgoBase",
"NormalPredictor",
"BaselineOnly",
"KNNBasic",
"KNNBaseline",
"KNNWithMeans",
"SVD",
"SVDpp",
"NMF",
"SlopeOne",
"CoClustering",
"PredictionImpossible",
"Prediction",
"KNNWithZScore",
]
<s> """ Algorithm predicting a random rating.
"""
import numpy as np
from .algo_base import AlgoBase
class NormalPredictor(AlgoBase):
"""Algorithm predicting a random rating based on the distribution of the
training set, which is assumed to be normal.
The prediction :math:`\\\\hat{r}_{ui}` is generated from a normal distribution
:math:`\\\\mathcal{N}(\\\\hat{\\\\mu}, \\\\hat{\\\\sigma}^2)` where :math:`\\\\hat{\\\\mu}` and
:math:`\\\\hat{\\\\sigma}` are estimated from the training data using Maximum
Likelihood Estimation:
.. math::
\\\\hat{\\\\mu} &= \\\\frac{1}{|R_{train}|} \\\\sum_{r_{ui} \\\\in R_{train}}
r_{ui}\\\\\\\\\\\\\\\\\\
\\\\hat{\\\\sigma} &= \\\\sqrt{\\\\sum_{r_{ui} \\\\in R_{train}}
\\\\frac{(r_{ui} - \\\\hat{\\\\mu})^2}{|R_{train}|}}
"""
def __init__(self):
AlgoBase.__init__(self)
def fit(self, trainset):
AlgoBase.fit(self, trainset)
num = sum(
(r - self.trainset.global_mean) ** 2
for (_, _, r) in self.trainset.all_ratings()
)
denum = self.trainset.n_ratings
self.sigma = np.sqrt(num / denum)
return self
def estimate(self, *_):
return np.random.normal(self.trainset.global_mean, self.sigma)
<s> """
The :mod:`surprise.prediction_algorithms.algo_base` module defines the base
class :class:`AlgoBase` from which every single prediction algorithm has to
inherit.
"""
import heapq
from .. import similarities as sims
from .optimize_baselines import baseline_als, baseline_sgd
from .predictions import Prediction, PredictionImpossible
class AlgoBase:
"""Abstract class where is defined the basic behavior of a prediction
algorithm.
Keyword Args:
baseline_options(dict, optional): If the algorithm needs to compute a
baseline estimate, the ``baseline_options`` parameter is used to
configure how they are computed. See
:ref:`baseline_estimates_configuration` for usage.
"""
def __init__(self, **kwargs):
self.bsl_options = kwargs.get("bsl_options", {})
self.sim_options = kwargs.get("sim_options", {})
if "user_based" not in self.sim_options:
self.sim_options["user_based"] = True
def fit(self, trainset):
"""Train an algorithm on a given training set.
This method is called by every derived class as the first basic step
for training an algorithm. It basically just initializes some internal
structures and set the self.trainset attribute.
Args:
trainset(:obj:`Trainset <surprise.Trainset>`) : A training
set, as returned by the :meth:`folds
<surprise.dataset.Dataset.folds>` method.
Returns:
self
"""
self.trainset = trainset
# (re) Initialise baselines
self.bu = self.bi = None
return self
def predict(self, uid, iid, r_ui=None, clip=True, verbose=False):
"""Compute the rating prediction for given user and item.
The ``predict`` method converts raw ids to inner ids and then calls the
``estimate`` method which is defined in every derived class. If the
prediction is impossible (e.g. because the user and/or the item is
unknown), the prediction is set according to
:meth:`default_prediction()
<surprise.prediction_algorithms.algo_base.AlgoBase.default_prediction>`.
Args:
uid: (Raw) id |
of the user. See :ref:`this note<raw_inner_note>`.
iid: (Raw) id of the item. See :ref:`this note<raw_inner_note>`.
r_ui(float): The true rating :math:`r_{ui}`. Optional, default is
``None``.
clip(bool): Whether to clip the estimation into the rating scale.
For example, if :math:`\\\\hat{r}_{ui}` is :math:`5.5` while the
rating scale is :math:`[1, 5]`, then :math:`\\\\hat{r}_{ui}` is
set to :math:`5`. Same goes if :math:`\\\\hat{r}_{ui} < 1`.
Default is ``True``.
verbose(bool): Whether to print details of the prediction. Default
is False.
Returns:
A :obj:`Prediction\\
<surprise.prediction_algorithms.predictions.Prediction>` object
containing:
- The (raw) user id ``uid``.
- The (raw) item id ``iid``.
- The true rating ``r_ui`` (:math:`r_{ui}`).
- The estimated rating (:math:`\\\\hat{r}_{ui}`).
- Some additional details about the prediction that might be useful
for later analysis.
"""
# Convert raw ids to inner ids
try:
iuid = self.trainset.to_inner_uid(uid)
except ValueError:
iuid = "UKN__" + str(uid)
try:
iiid = self.trainset.to_inner_iid(iid)
except ValueError:
iiid = "UKN__" + str(iid)
details = {}
try:
est = self.estimate(iuid, iiid)
# If the details dict was also returned
if isinstance(est, tuple):
est, details = est
details["was_impossible"] = False
except PredictionImpossible as e:
est = self.default_prediction()
details["was_impossible"] = True
details["reason"] = str(e)
# clip estimate into [lower_bound, higher_bound]
if clip:
lower_bound, higher_bound = self.trainset.rating_scale
est = min(higher_bound, est)
est = max(lower_bound, est)
pred = Prediction(uid, iid, r_ui, est, details)
if verbose:
print(pred)
return pred
def default_prediction(self):
"""Used when the ``PredictionImpossible`` exception is raised during a
call to :meth:`predict()
<surprise.prediction_algorithms.algo_base.AlgoBase.predict>`. By
default, return the global mean of all ratings (can be overridden in
child classes).
Returns:
(float): The mean of all ratings in the trainset.
"""
return self.trainset.global_mean
def test(self, testset, verbose=False):
"""Test the algorithm on given testset, i.e. estimate all the ratings
in the given testset.
Args:
testset: A test set, as returned by a :ref:`cross-validation
itertor<use_cross_validation_iterators>` or by the
:meth:`build_testset() <surprise.Trainset.build_testset>`
method.
verbose(bool): Whether to print details for each predictions.
Default is False.
Returns:
A list of :class:`Prediction\\
<surprise.prediction_algorithms.predictions.Prediction>` objects
that contains all the estimated ratings.
"""
# The ratings are translated back to their original scale.
predictions = [
self.predict(uid, iid, r_ui_trans, verbose=verbose)
for (uid, iid, r_ui_trans) in testset
]
return predictions
def compute_baselines(self):
"""Compute users and items baselines.
The way baselines are computed depends on the ``bsl_options`` parameter
passed at the creation of the algorithm (see
:ref:`baseline_estimates_configuration`).
This method is only relevant for algorithms using :func:`Pearson
baseline similarity<surprise.similarities.pearson_baseline>` or the
:class:`BaselineOnly
<surprise.prediction_algorithms.baseline_only.BaselineOnly>` algorithm.
Returns:
A tuple ``(bu, bi)``, which are users and items baselines."""
# Firt of, if this method has already been called before on the same
# trainset, then just return. Indeed, compute_baselines may be called
# more than one time, for example when a similarity metric (e.g.
# pearson_baseline) uses baseline estimates.
if self.bu is not None:
return self.bu, self.bi
method = dict(als=baseline_als, sgd=baseline_sgd)
method_name = self.bsl_options.get("method", "als")
try:
if getattr(self, "verbose", False):
print("Estimating biases using", method_name + "...")
self.bu, self.bi = method[method_name](self)
return self.bu, self.bi
except KeyError:
raise ValueError(
"Invalid method "
+ method_name
+ " for baseline computation."
+ " Available methods are als and sgd."
)
def compute_similarities(self):
"""Build the similarity matrix.
The way the similarity matrix is computed depends on the
``sim_options`` parameter passed at the creation of the algorithm (see
:ref:`similarity_measures_configuration`).
This method is only relevant for algorithms using a similarity measure,
such as the :ref:`k-NN algorithms <pred_package_knn_inpired>`.
Returns:
The similarity matrix."""
construction_func = {
"cosine": sims.cosine,
"msd": sims.msd,
"pearson": sims.pearson,
"pearson_baseline": sims.pearson_baseline,
}
if self.sim_options["user_based"]:
n_x, yr = self.trainset.n_users, self.trainset.ir
else:
n_x, yr = self.trainset.n_items, self.trainset.ur
min_support = self.sim_options.get("min_support", 1)
args = [n_x, yr, min_support]
name = self.sim_options.get("name", "msd").lower()
if name == "pearson_baseline":
shrinkage = self.sim_options.get("shrinkage", 100)
bu, bi = self.compute_baselines()
if self.sim_options["user_based"]:
bx, by = bu, bi
else:
bx, by = bi, bu
args += [self.trainset.global_mean, bx, by, shrinkage]
try:
if getattr(self, "verbose", False):
print(f"Computing the {name} similarity matrix...")
sim = construction_func[name](*args)
if getattr(self, "verbose", False):
print("Done computing similarity matrix.")
return sim
except KeyError:
raise NameError(
"Wrong sim name "
+ name
+ ". Allowed values "
+ "are "
+ ", ".join(construction_func.keys())
+ "."
)
def get_neighbors(self, iid, k):
"""Return the ``k`` nearest neighbors of ``iid``, which is the inner id
of a user or an item, depending on the ``user_based`` field of
``sim_options`` (see :ref:`similarity_measures_configuration`).
As the similarities are computed on the basis of a similarity measure,
this method is only relevant for algorithms using a similarity measure,
such as the :ref:`k-NN algorithms <pred_package_knn_inpired>`.
For a usage example, see the :ref:`FAQ <get_k_nearest_neighbors>`.
Args:
iid(int): The (inner) id of the user (or item) for which we want
the nearest neighbors. See :ref:`this note<raw_inner_note>`.
k(int): The number of neighbors to retrieve.
Returns:
The list of the ``k`` (inner) ids of the closest users (or items)
to ``iid``.
"""
if self.sim_options["user_based"]:
all_instances = self.trainset.all_users
else:
all_instances = self.trainset.all_items
others = [(x, self.sim[iid, x]) for x in all_instances() if x != iid]
others = heapq.nlargest(k, others, key=lambda tple: tple[1])
k_nearest_neighbors = [j for (j, _) in others]
return k_nearest_neighbors
<s> """
The :mod:`surprise.prediction_algorithms.predictions` module defines the
:class:`Prediction` named tuple and the :class:`PredictionImpossible`
exception.
"""
from collections import namedtuple
class PredictionImpossible(Exception):
r"""Exception raised when a prediction is impossible.
When raised, the estimation :math:`\\hat{r}_{ui}` is set to the global mean
of all ratings :math:`\\mu`.
"""
pass
class Prediction(namedtuple("Prediction", ["uid", "iid", "r_ui", "est", "details"])):
"""A named tuple for storing the results of a prediction.
It's wrapped in a class, but only for documentation and printing purposes.
Args:
uid: The (raw) user id. See :ref:`this note<raw_inner_note>`.
iid: The (raw) item id. See :ref:`this note<raw_inner_note>`.
r_ui(float): The true rating :math:`r_{ui}`.
est(float): The estimated rating :math:`\\\\hat{r}_{ui}`.
details (dict): Stores additional details about the prediction that
might be useful for later analysis.
"""
__slots__ = () # for memory saving purpose.
def __str__(self):
s = f"user: {self.uid:<10} "
s += f"item: {self.iid:<10} "
if self.r_ui is not None:
s += f"r_ui = {self.r_ui:1.2f} "
else:
s += "r_ui = None "
s += f"est = {self.est:1.2f} "
s += str(self.details)
return s
<s> from .search import GridSearchCV, RandomizedSearchCV
from .split import (
KFold,
LeaveOneOut,
PredefinedKFold,
RepeatedKFold,
ShuffleSplit,
train_test_split,
)
from .validation import cross_validate
__all__ = [
"KFold",
"ShuffleSplit",
"train_test_split",
"RepeatedKFold",
"LeaveOneOut",
"PredefinedKFold",
"cross_validate",
"GridSearchCV",
"RandomizedSearchCV",
]
<s> """
The :mod:`model_selection.split<surprise.model_selection.split>` module
contains various cross-validation iterators. Design and tools are inspired from
the mighty scikit learn.
The available iterators are:
.. autosummary::
:nosignatures:
KFold
RepeatedKFold
ShuffleSplit
LeaveOneOut
PredefinedKFold
This module also contains a function for splitting datasets into trainset and
testset:
.. autosummary::
:nosignatures:
train_test_split
"""
import numbers
from collections import defaultdict
from itertools import chain
from math import ceil, floor
import numpy as np
from ..utils import get_rng
def get_cv(cv):
"""Return a 'validated' CV iterator."""
if cv is None:
return KFold(n_splits=5)
if isinstance(cv, numbers.Integral):
return KFold(n_splits=cv)
if hasattr(cv, "split") and not isinstance(cv, str):
return cv # str have split
raise ValueError(
"Wrong CV object. Expecting None, an int or CV iterator, "
"got a {}".format(type(cv))
)
class KFold:
"""A basic cross-validation iterator.
Each fold is used once as a testset while the k - 1 remaining folds are
used for training.
See an example in the :ref:`User Guide <use_cross_validation_iterators>`.
Args:
n_splits(int): The number of folds.
random_state(int, RandomState instance from numpy, or ``None``):
Determines the RNG that will be used for determining the folds. If
int, ``random_state`` will be used as a seed for a new RNG. This is
useful to get the same splits over multiple calls to ``split()``.
If RandomState instance, this same instance is used as RNG. If
``None``, the current RNG from numpy is used. ``random_state`` is
only used if ``shuffle`` is ``True``. Default is ``None``.
shuffle(bool): Whether to shuffle the ratings in the ``data`` parameter
of the ``split()`` method. Shuffling is not done in-place. Default
is ``True``.
"""
def __init__(self, n_splits=5, random_state=None, shuffle=True):
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = random_state
def split(self, data):
"""Generator function to iterate over trainsets and testsets.
Args:
data(:obj:`Dataset<surprise.dataset.Dataset>`): The data containing
ratings that will be divided into trainsets and testsets.
Yields:
tuple of (trainset, testset)
"""
if self.n_splits > len(data.raw_ratings) or self.n_splits < 2:
raise ValueError(
"Incorrect value for n_splits={}. "
"Must be >=2 and less than the number "
"of ratings".format(len(data.raw_ratings))
)
# We use indices to avoid shuffling the original data.raw_ratings list.
indices = np.arange(len(data.raw_ratings))
if self.shuffle:
get_rng(self.random_state).shuffle(indices)
start, stop = 0, 0
for fold_i in range(self.n_splits):
start = stop
stop += len(indices) // self.n_splits
if fold_i < len(indices) % self.n_splits:
stop += 1
raw_trainset = [
data.raw_ratings[i] for i in chain(indices[:start], indices[stop:])
]
raw_testset = [data.raw_ratings[i] for i in indices[start:stop]]
trainset = data.construct_trainset(raw_trainset)
testset = data.construct_testset(raw_testset)
yield trainset, testset
def get_n_folds(self): |