content
stringlengths 255
17.2k
|
---|
<s> import argparse
import sys
import os
import subprocess
INSTALL = 'install'
LINUXINSTALL = 'linuxinstall'
FE_MIGRATE = 'migrateappfe'
LAUNCH_KAFKA = 'launchkafkaconsumer'
RUN_LOCAL_MLAC_PIPELINE = 'runpipelinelocal'
BUILD_MLAC_CONTAINER = 'buildmlaccontainerlocal'
CONVERT_MODEL = 'convertmodel'
START_MLFLOW = 'mlflow'
COMMON_SERVICE = 'service'
TRAINING = 'training'
TRAINING_AWS = 'trainingonaws'
TRAINING_DISTRIBUTED = 'distributedtraining'
START_APPF = 'appfe'
ONLINE_TRAINING = 'onlinetraining'
TEXT_SUMMARIZATION = 'textsummarization'
GENERATE_MLAC = 'generatemlac'
AWS_TRAINING = 'awstraining'
LLAMA_7B_TUNING = 'llama7btuning'
LLM_PROMPT = 'llmprompt'
LLM_TUNING = 'llmtuning'
LLM_PUBLISH = 'llmpublish'
LLM_BENCHMARKING = 'llmbenchmarking'
TELEMETRY_PUSH = 'pushtelemetry'
def aion_aws_training(confFile):
from hyperscalers.aion_aws_training import awsTraining
status = awsTraining(confFile)
print(status)
def aion_training(confFile):
from bin.aion_pipeline import aion_train_model
status = aion_train_model(confFile)
print(status)
def aion_awstraining(config_file):
from hyperscalers import aws_instance
print(config_file)
aws_instance.training(config_file)
def aion_generatemlac(ConfFile):
from bin.aion_mlac import generate_mlac_code
status = generate_mlac_code(ConfFile)
print(status)
def aion_textsummarization(confFile):
from bin.aion_text_summarizer import aion_textsummary
status = aion_textsummary(confFile)
def aion_oltraining(confFile):
from bin.aion_online_pipeline import aion_ot_train_model
status = aion_ot_train_model(confFile)
print(status)
def do_telemetry_sync():
from appbe.telemetry import SyncTelemetry
SyncTelemetry()
def aion_llm_publish(cloudconfig,instanceid,hypervisor,model,usecaseid,region,image):
from llm.llm_inference import LLM_publish
LLM_publish(cloudconfig,instanceid,hypervisor,model,usecaseid,region,image)
def aion_migratefe(operation):
import os
import sys
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'appfe.ux.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
argi=[]
argi.append(os.path.abspath(__file__))
argi.append(operation)
execute_from_command_line(argi)
def aion_appfe(url,port):
#manage_location = os.path.join(os.path.dirname(os.path.abspath(__file__)),'manage.py')
#subprocess.check_call([sys.executable,manage_location, "runserver","%s:%s"%(url,port)])
import os
import sys
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'appfe.ux.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
argi=[]
argi.append(os.path.abspath(__file__))
argi.append('runaion')
argi.append("%s:%s"%(url,port))
execute_from_command_line(argi)
def aion_linux_install(version):
from install import linux_dependencies
linux_dependencies.process(version)
def aion_install(version):
from install import dependencies
dependencies.process(version)
def aion_service(ip,port,username,password):
from bin.aion_service import start_server
start_server(ip,port,username,password)
def aion_distributedLearning(confFile):
from distributed_learning import learning
learning.training(confFile)
def aion_launchkafkaconsumer():
from mlops import kafka_consumer
kafka_consumer.launch_kafka_consumer()
def aion_start_mlflow():
from appbe.dataPath import DEPLOY_LOCATION
import platform
import shutil
from os.path import expanduser
mlflowpath = os.path.normpath(os.path.join(os.path.dirname(__file__),'..','..','..','Scripts','mlflow.exe'))
print(mlflowpath)
home = expanduser("~")
if platform.system() == 'Windows':
DEPLOY_LOCATION = os.path.join(DEPLOY_LOCATION,'mlruns')
outputStr = subprocess.Popen([sys.executable, mlflowpath,"ui", "--backend-store-uri","file:///"+DEPLOY_LOCATION])
else:
DEPLOY_LOCATION = os.path.join(DEPLOY_LOCATION,'mlruns')
subprocess.check_call(['mlflow',"ui","-h","0.0.0.0","--backend-store-uri","file:///"+DEPLOY_LOCATION])
def aion_model_conversion(config_file):
from conversions import model_convertions
model_convertions.convert(config_file)
def aion_model_buildMLaCContainer(config):
from mlops import build_container
build_container.local_docker_build(config)
def aion_model_runpipelinelocal(config):
from mlops import local_pipeline
local_pipeline.run_pipeline(config)
def aion_llm_tuning(config):
from llm.llm_tuning import run
run(config)
def aion_llm_prompt(cloudconfig,instanceid,prompt):
from llm.aws_instance_api import LLM_predict
LLM_predict(cloudconfig,instanceid,prompt)
def llm_bench_marking(hypervisor,instanceid,model,usecaseid,eval):
print(eval)
from llm.bench_marking import bench_mark
bench_mark(hypervisor,instanceid,model,usecaseid,eval)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--configPath', help='Config File Path')
parser.add_argument('-i', '--instanceid', help='instanceid')
parser.add_argument('-hv', '--hypervisor', help='hypervisor')
parser.add_argument('-md', '--model', help='model')
parser.add_argument('-uc', '--usecase', help='usecase')
parser.add_argument('-cc', '--cloudConfigPath', help='Cloud Config File Path')
parser.add_argument('-m', '--module', help='MODULE=TRAINING, APPFE, ONLINETRAINING,DISTRIBUTEDTRAINING')
parser.add_argument('-ip', '--ipaddress', help='URL applicable only for APPFE method ')
parser.add_argument('-p', '--port', help='APP Front End Port applicable only for APPFE method ')
parser.add_argument('-ac', '--appfecommand', help='APP Front End Command ')
parser.add_argument('-un','--username', help="USERNAME")
parser.add_argument('-passw','--password', help="PASSWORD")
parser.add_argument('-j', '--jsoninput', help='JSON Input')
parser.add_argument('-v', '--version', help='Installer Version')
parser.add_argument('-pf', '--prompt', help='Prompt File')
parser.add_argument('-r', '--region', help='REGION NAME')
parser.add_argument('-im', '--image', help='IMAGE NAME')
parser.add_argument('-e', '--eval', help='evaluation for code or doc', default='doc')
args = parser.parse_args()
if args.module.lower() == TRAINING:
aion_training(args.configPath)
elif args.module.lower() == TRAINING_AWS:
aion_awstraining(args.configPath)
elif args.module.lower() == TRAINING_DISTRIBUTED:
aion_distributedLearning(args.configPath)
elif args.module.lower() == START_APPF:
aion_appfe(args.ipaddress,args.port)
elif args.module.lower() == ONLINE_TRAINING:
aion_oltraining(args.configPath)
elif args.module.lower() == TEXT_SUMMARIZATION:
aion_textsummarization(args.configPath)
elif args.module.lower() == GENERATE_MLAC:
aion_generatemlac(args.configPath)
elif args.module.lower() == COMMON_SERVICE:
aion_service(args.ipaddress,args.port,args.username,args.password)
elif args.module.lower() == START_MLFLOW:
aion_mlflow()
elif args.module.lower() == CONVERT_MODEL:
aion_model_conversion(args.configPath)
elif args.module.lower() == BUILD_MLAC_CONTAINER:
aion_model_buildMLaCContainer(args.jsoninput)
elif args.module.lower() == RUN_LOCAL_MLAC_PIPELINE:
aion_model_runpipelinelocal(args.jsoninput)
elif args.module.lower() == LAUNCH_KAFKA:
aion_launchkafkaconsumer()
elif args.module.lower() == INSTALL:
aion_install(args.version)
elif args.module.lower() == LINUXINSTALL:
aion_linux_install(args.version)
elif args.module.lower() == FE_MIGRATE:
aion_migratefe('makemigrations')
aion_migratefe('migrate')
elif args.module.lower() == AWS_TRAINING:
aion_aws_training(args.configPath)
elif args.module.lower() == LLAMA_7B_TUNING:
aion_llm_tuning(args.configPath)
elif args.module.lower() == LLM_TUNING:
aion_llm_tuning(args.configPath)
elif args.module.lower() == LLM_PROMPT:
aion_llm_prompt(args.cloudConfigPath,args.instanceid,args.prompt)
elif args.module.lower() == LLM_PUBLISH:
aion_llm_publish(args.cloudConfigPath,args.instanceid,args.hypervisor,args.model,args.usecase,args.region,args.image)
elif args.module.lower() == LLM_BENCHMARKING:
llm_bench_marking(args.hypervisor,args.instanceid,args.model,args.usecase, args.eval)
elif args.module.lower() == TELEMETRY_PUSH:
do_telemetry_sync()<s> import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__))))
from .bin.aion_pipeline import aion_train_model
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import sys
import json
import datetime, time, timeit
import argparse
import logging
logging.getLogger('tensorflow').disabled = True
import math
import shutil
import re
from datetime import datetime as dt
import warnings
from config_manager.pipeline_config import AionConfigManager
import pandas as pd
import numpy as np
import sklearn
import string
from records import pushrecords
import logging
from pathlib import Path
from pytz import timezone
from config_manager.config_gen import code_configure
import joblib
from sklearn.model_selection import train_test_split
from config_manager.check_config import config_validate
from utils.file_ops import save_csv_compressed,save_csv,save_chromadb
LOG_FILE_NAME = 'model_training_logs.log'
if 'AION' in sys.modules:
try:
from appbe.app_config import DEBUG_ENABLED
except:
DEBUG_ENABLED = False
else:
DEBUG_ENABLED = True
def getversion():
configFolder = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','config')
version = 'NA'
for file in os.listdir(configFolder):
if file.endswith(".var"):
version = file.rsplit('.', 1)
version = version[0]
break
return version
AION_VERSION = getversion()
def pushRecordForTraining():
try:
status,msg = pushrecords.enterRecord(AION_VERSION)
except Exception as e:
print("Exception", e)
status = False
msg = str(e)
return status,msg
def mlflowSetPath(path,experimentname):
import mlflow
url = "file:" + str(Path(path).parent.parent) + "/mlruns"
mlflow.set_tracking_uri(url)
mlflow.set_experiment(str(experimentname))
def set_log_handler( basic, mode='w'):
deploy_loc = Path(basic.get('deployLocation'))
log_file_parent = deploy_loc/basic['modelName']/basic['modelVersion']/'log'
log_file_parent.mkdir(parents=True, exist_ok=True)
log_file = log_file_parent/LOG_FILE_NAME
filehandler = logging.FileHandler(log_file, mode,'utf-8')
formatter = logging.Formatter('%(message)s')
filehandler.setFormatter(formatter)
log = logging.getLogger('eion')
log.propagate = False
for hdlr in |
log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
log.removeHandler(hdlr)
log.addHandler(filehandler)
log.setLevel(logging.INFO)
return log
class server():
def __init__(self):
self.response = None
self.features=[]
self.mFeatures=[]
self.emptyFeatures=[]
self.textFeatures=[]
self.vectorizerFeatures=[]
self.wordToNumericFeatures=[]
self.profilerAction = []
self.targetType = ''
self.matrix1='{'
self.matrix2='{'
self.matrix='{'
self.trainmatrix='{'
self.numericalFeatures=[]
self.nonNumericFeatures=[]
self.similarGroups=[]
self.dfcols=0
self.dfrows=0
self.method = 'NA'
self.pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
self.modelSelTopFeatures=[]
self.topFeatures=[]
self.allFeatures=[]
def startScriptExecution(self, config_obj, codeConfigure, log):
oldStdout = sys.stdout
model_training_details = ''
model_tried=''
learner_type = ''
topics = {}
pred_filename = ''
numericContinuousFeatures=''
discreteFeatures=''
sessonal_freq = ''
additional_regressors = ''
threshold=-1
targetColumn = ''
numericalFeatures =''
nonNumericFeatures=''
categoricalFeatures=''
dataFolderLocation = ''
featureReduction = 'False'
original_data_file = ''
normalizer_pickle_file = ''
pcaModel_pickle_file = ''
bpca_features= []
apca_features = []
lag_order = 1
profiled_data_file = ''
trained_data_file = ''
predicted_data_file=''
dictDiffCount={}
cleaning_kwargs = {}
grouperbyjson = ''
rowfilterexpression=''
featureEngineeringSelector = 'false'
conversion_method = ''
params={}
loss_matrix='binary_crossentropy'
optimizer='Nadam'
numericToLabel_json='[]'
preprocessing_pipe=''
firstDocFeature = ''
secondDocFeature = ''
padding_length = 30
pipe = None
scalertransformationFile=None
column_merge_flag = False
merge_columns = []
score = 0
profilerObj = None
imageconfig=''
labelMaps={}
featureDataShape=[]
normFeatures = []
preprocess_out_columns = []
preprocess_pipe = None
label_encoder = None
unpreprocessed_columns = []
import pickle
iterName,iterVersion,dataLocation,deployLocation,delimiter,textqualifier = config_obj.getAIONLocationSettings()
inlierLabels=config_obj.getEionInliers()
scoreParam = config_obj.getScoringCreteria()
noofforecasts = config_obj.getNumberofForecasts()
datetimeFeature,indexFeature,modelFeatures=config_obj.getFeatures()
filter_expression = config_obj.getFilterExpression()
refined_filter_expression = ""
sa_images = []
model_tried = ''
deploy_config = {}
iterName = iterName.replace(" ", "_")
deployFolder = deployLocation
usecaseLocation,deployLocation,dataFolderLocation,imageFolderLocation,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,logFileName,outputjsonFile,reduction_data_file = config_obj.createDeploymentFolders(deployFolder,iterName,iterVersion)
outputLocation=deployLocation
mlflowSetPath(deployLocation,iterName+'_'+iterVersion)
# mlflowSetPath shut down the logger, so set again
set_log_handler( config_obj.basic, mode='a')
xtrain=pd.DataFrame()
xtest=pd.DataFrame()
log.info('Status:-|... AION Training Configuration started')
startTime = timeit.default_timer()
try:
output = {'bestModel': '', 'bestScore': 0, 'bestParams': {}}
problem_type,targetFeature,profiler_status,selector_status,learner_status,deeplearner_status,timeseriesStatus,textsummarizationStatus,survival_analysis_status,textSimilarityStatus,inputDriftStatus,outputDriftStatus,recommenderStatus,visualizationstatus,deploy_status,associationRuleStatus,imageClassificationStatus,forecastingStatus, objectDetectionStatus,stateTransitionStatus, similarityIdentificationStatus,contextualSearchStatus,anomalyDetectionStatus = config_obj.getModulesDetails()
status, error_id, msg = config_obj.validate_config()
if not status:
if error_id == 'fasttext':
raise ValueError(msg)
VideoProcessing = False
if(problem_type.lower() in ['classification','regression']):
if(targetFeature == ''):
output = {"status":"FAIL","message":"Target Feature is Must for Classification and Regression Problem Type"}
return output
from transformations.dataReader import dataReader
objData = dataReader()
DataIsFolder = False
folderdetails = config_obj.getFolderSettings()
if os.path.isfile(dataLocation):
log.info('Status:-|... AION Loading Data')
dataFrame = objData.csvTodf(dataLocation,delimiter,textqualifier)
status,msg = save_csv_compressed(dataFrame,original_data_file)
if not status:
log.info('CSV File Error: '+str(msg))
elif os.path.isdir(dataLocation):
if problem_type.lower() == 'summarization':
from document_summarizer import summarize
keywords, pretrained_type, embedding_sz = summarize.get_params()
dataFrame = summarize.to_dataframe(dataLocation,keywords, deploy_loc, pretrained_type, embedding_sz)
problem_type = 'classification'
targetFeature = 'label'
scoreParam = 'Accuracy'
elif folderdetails['fileType'].lower() == 'document':
dataFrame, error = objData.documentsTodf(dataLocation, folderdetails['labelDataFile'])
if error:
log.info(error)
elif folderdetails['fileType'].lower() == 'object':
testPercentage = config_obj.getAIONTestTrainPercentage() #Unnati
intermediateLocation = os.path.join(deployLocation,'intermediate')
os.mkdir(intermediateLocation)
AugEnabled,keepAugImages,operations,augConf = config_obj.getEionImageAugmentationConfiguration()
dataFrame, n_class = objData.createTFRecord(dataLocation, intermediateLocation, folderdetails['labelDataFile'], testPercentage,AugEnabled,keepAugImages,operations, "objectdetection",augConf) #Unnati
DataIsFolder = True
else:
datafilelocation = os.path.join(dataLocation,folderdetails['labelDataFile'])
dataFrame = objData.csvTodf(datafilelocation,delimiter,textqualifier)
DataIsFolder = True
if textSimilarityStatus or similarityIdentificationStatus or contextualSearchStatus:
similaritydf = dataFrame
filter = config_obj.getfilter()
if filter != 'NA':
dataFrame,rowfilterexpression = objData.rowsfilter(filter,dataFrame)
timegrouper = config_obj.gettimegrouper()
grouping = config_obj.getgrouper()
if grouping != 'NA':
dataFrame,grouperbyjson = objData.grouping(grouping,dataFrame)
elif timegrouper != 'NA':
dataFrame,grouperbyjson = objData.timeGrouping(timegrouper,dataFrame)
if timeseriesStatus or anomalyDetectionStatus:
from utils.validate_inputs import dataGarbageValue
status,msg = dataGarbageValue(dataFrame,datetimeFeature)
if status.lower() == 'error':
raise ValueError(msg)
if not DataIsFolder:
if timeseriesStatus:
if(modelFeatures != 'NA' and datetimeFeature != ''):
if datetimeFeature:
if isinstance(datetimeFeature, list): #to handle if time series having multiple time column
unpreprocessed_columns = unpreprocessed_columns + datetimeFeature
else:
unpreprocessed_columns = unpreprocessed_columns + datetimeFeature.split(',')
if datetimeFeature not in modelFeatures:
modelFeatures = modelFeatures+','+datetimeFeature
dataFrame = objData.removeFeatures(dataFrame,'NA',indexFeature,modelFeatures,targetFeature)
elif survival_analysis_status or anomalyDetectionStatus:
if(modelFeatures != 'NA'):
if datetimeFeature != 'NA' and datetimeFeature != '':
unpreprocessed_columns = unpreprocessed_columns + datetimeFeature.split(',')
if datetimeFeature not in modelFeatures:
modelFeatures = modelFeatures+','+datetimeFeature
dataFrame = objData.removeFeatures(dataFrame,'NA',indexFeature,modelFeatures,targetFeature)
else:
dataFrame = objData.removeFeatures(dataFrame,datetimeFeature,indexFeature,modelFeatures,targetFeature)
log.info('\\n-------> First Ten Rows of Input Data: ')
log.info(dataFrame.head(10))
self.dfrows=dataFrame.shape[0]
self.dfcols=dataFrame.shape[1]
log.info('\\n-------> Rows: '+str(self.dfrows))
log.info('\\n-------> Columns: '+str(self.dfcols))
topFeatures=[]
profilerObj = None
normalizer=None
dataLoadTime = timeit.default_timer() - startTime
log.info('-------> COMPUTING: Total dataLoadTime time(sec) :'+str(dataLoadTime))
if timeseriesStatus:
if datetimeFeature != 'NA' and datetimeFeature != '':
preproces_config = config_obj.basic.get('preprocessing',{}).get('timeSeriesForecasting',{})
if preproces_config:
from transformations.preprocess import timeSeries as ts_preprocess
preprocess_obj = ts_preprocess( preproces_config,datetimeFeature, log)
dataFrame = preprocess_obj.run( dataFrame)
log.info('-------> Input dataFrame(5 Rows) after preprocessing: ')
log.info(dataFrame.head(5))
deploy_config['preprocess'] = {}
deploy_config['preprocess']['code'] = preprocess_obj.get_code()
if profiler_status:
log.info('\\n================== Data Profiler has started ==================')
log.info('Status:-|... AION feature transformation started')
from transformations.dataProfiler import profiler as dataProfiler
dp_mlstart = time.time()
profilerJson = config_obj.getEionProfilerConfigurarion()
log.info('-------> Input dataFrame(5 Rows): ')
log.info(dataFrame.head(5))
log.info('-------> DataFrame Shape (Row,Columns): '+str(dataFrame.shape))
testPercentage = config_obj.getAIONTestTrainPercentage() #Unnati
if DataIsFolder:
if folderdetails['type'].lower() != 'objectdetection':
profilerObj = dataProfiler(dataFrame)
topFeatures,VideoProcessing,tfrecord_directory = profilerObj.folderPreprocessing(dataLocation,folderdetails,deployLocation)
elif textSimilarityStatus:
firstDocFeature = config_obj.getFirstDocumentFeature()
secondDocFeature = config_obj.getSecondDocumentFeature()
profilerObj = dataProfiler(dataFrame,targetFeature, data_path=dataFolderLocation)
dataFrame,pipe,targetColumn,topFeatures = profilerObj.textSimilarityStartProfiler(firstDocFeature,secondDocFeature)
elif recommenderStatus:
profilerObj = dataProfiler(dataFrame)
dataFrame = profilerObj.recommenderStartProfiler(modelFeatures)
else:
if deeplearner_status or learner_status:
if (problem_type.lower() != 'clustering') and (problem_type.lower() != 'topicmodelling'):
if targetFeature != '':
try:
biasingDetail = config_obj.getDebiasingDetail()
if len(biasingDetail) > 0:
if biasingDetail['FeatureName'] != 'None':
protected_feature = biasingDetail['FeatureName']
privileged_className = biasingDetail['ClassName']
target_feature = biasingDetail['TargetFeature']
algorithm = biasingDetail['Algorithm']
from debiasing.DebiasingManager import DebiasingManager
mgrObj = DebiasingManager()
log.info('Status:-|... Debiasing transformation started')
transf_dataFrame = mgrObj.Bias_Mitigate(dataFrame, protected_feature, privileged_className, target_feature, algorithm)
log.info('Status:-|... Debiasing transformation completed')
dataFrame = transf_dataFrame
except Exception as e:
print(e)
pass
# ---------------------------------------------- ----------------------------------------------
targetData = dataFrame[targetFeature]
featureData = dataFrame[dataFrame.columns.difference([targetFeature])]
testPercentage = config_obj.getAIONTestTrainPercentage() #Unnati
xtrain,ytrain,xtest,ytest = self.split_into_train_test_data(featureData,targetData,testPercentage,log,problem_type.lower())
xtrain.reset_index(drop=True,inplace=True)
ytrain.reset_index(drop=True,inplace=True)
xtest.reset_index(drop=True,inplace=True)
ytest.reset_index(drop=True,inplace=True)
dataFrame = xtrain
dataFrame[targetFeature] = ytrain
encode_target_problems = ['classification','anomalyDetection', 'timeSeriesAnomalyDetection'] #task 11997
if problem_type == 'survivalAnalysis' and dataFrame[targetFeature].nunique() > 1:
encode_target_problems.append('survivalAnalysis')
if timeseriesStatus: #task 12627 calling data profiler without target feature specified separately (i.e) profiling is done for model features along with target features
profilerObj = dataProfiler(dataFrame, config=profilerJson, keep_unprocessed = unpreprocessed_columns.copy(), data_path=dataFolderLocation)
else:
profilerObj = dataProfiler(dataFrame, target=targetFeature, encode_target= problem_type in encode_target_problems, config=profilerJson, keep_unprocessed = unpreprocessed_columns.copy(), data_path=dataFolderLocation) #task 12627
dataFrame |
, preprocess_pipe, label_encoder = profilerObj.transform()
preprocess_out_columns = dataFrame.columns.tolist()
if not timeseriesStatus: #task 12627 preprocess_out_columns goes as output_columns in target folder script/input_profiler.py, It should contain the target feature also as it is what is used for forecasting
if targetFeature in preprocess_out_columns:
preprocess_out_columns.remove(targetFeature)
for x in unpreprocessed_columns:
preprocess_out_columns.remove(x)
if label_encoder:
joblib.dump(label_encoder, Path(deployLocation)/'model'/'label_encoder.pkl')
labelMaps = dict(zip(label_encoder.classes_, label_encoder.transform(label_encoder.classes_)))
codeConfigure.update_config('train_features',list(profilerObj.train_features_type.keys()))
codeConfigure.update_config('text_features',profilerObj.text_feature)
self.textFeatures = profilerObj.text_feature
deploy_config['profiler'] = {}
deploy_config['profiler']['input_features'] = list(profilerObj.train_features_type.keys())
deploy_config['profiler']['output_features'] = preprocess_out_columns
deploy_config['profiler']['input_features_type'] = profilerObj.train_features_type
deploy_config['profiler']['word2num_features'] = profilerObj.wordToNumericFeatures
deploy_config['profiler']['unpreprocessed_columns'] = unpreprocessed_columns
deploy_config['profiler']['force_numeric_conv'] = profilerObj.force_numeric_conv
if self.textFeatures:
deploy_config['profiler']['conversion_method'] = config_obj.get_conversion_method()
if anomalyDetectionStatus and datetimeFeature != 'NA' and datetimeFeature != '':
if unpreprocessed_columns:
dataFrame.set_index( unpreprocessed_columns[0], inplace=True)
log.info('-------> Data Frame Post Data Profiling(5 Rows): ')
log.info(dataFrame.head(5))
if not xtest.empty:
if targetFeature != '':
non_null_index = ytest.notna()
ytest = ytest[non_null_index]
xtest = xtest[non_null_index]
if profilerObj.force_numeric_conv:
xtest[ profilerObj.force_numeric_conv] = xtest[profilerObj.force_numeric_conv].apply(pd.to_numeric,errors='coerce')
xtest.astype(profilerObj.train_features_type)
if unpreprocessed_columns:
xtest_unprocessed = xtest[unpreprocessed_columns]
xtest = preprocess_pipe.transform(xtest)
if not isinstance(xtest, np.ndarray):
xtest = xtest.toarray()
xtest = pd.DataFrame(xtest, columns=preprocess_out_columns)
if unpreprocessed_columns:
xtest[unpreprocessed_columns] = xtest_unprocessed
if survival_analysis_status:
xtest.astype({x:'float' for x in unpreprocessed_columns})
xtrain.astype({x:'float' for x in unpreprocessed_columns})
#task 11997 removed setting datetime column as index of dataframe code as it is already done before
if label_encoder:
ytest = label_encoder.transform(ytest)
if preprocess_pipe:
if self.textFeatures:
from text.textProfiler import reset_pretrained_model
reset_pretrained_model(preprocess_pipe) # pickle is not possible for fasttext model ( binary)
joblib.dump(preprocess_pipe, Path(deployLocation)/'model'/'preprocess_pipe.pkl')
self.features=topFeatures
if targetColumn in topFeatures:
topFeatures.remove(targetColumn)
self.topFeatures=topFeatures
if normalizer != None:
normalizer_file_path = os.path.join(deployLocation,'model','normalizer_pipe.sav')
normalizer_pickle_file = 'normalizer_pipe.sav'
pickle.dump(normalizer, open(normalizer_file_path,'wb'))
log.info('Status:-|... AION feature transformation completed')
dp_mlexecutionTime=time.time() - dp_mlstart
log.info('-------> COMPUTING: Total Data Profiling Execution Time '+str(dp_mlexecutionTime))
log.info('================== Data Profiling completed ==================\\n')
else:
datacolumns=list(dataFrame.columns)
if targetFeature in datacolumns:
datacolumns.remove(targetFeature)
if not timeseriesStatus and not anomalyDetectionStatus and not inputDriftStatus and not outputDriftStatus and not imageClassificationStatus and not associationRuleStatus and not objectDetectionStatus and not stateTransitionStatus and not textsummarizationStatus:
self.textFeatures,self.vectorizerFeatures,pipe,column_merge_flag,merge_columns = profilerObj.checkForTextClassification(dataFrame)
self.topFeatures =datacolumns
if(pipe is not None):
preprocessing_pipe = 'pppipe'+iterName+'_'+iterVersion+'.sav'
ppfilename = os.path.join(deployLocation,'model','pppipe'+iterName+'_'+iterVersion+'.sav')
pickle.dump(pipe, open(ppfilename, 'wb'))
status, msg = save_csv_compressed(dataFrame,profiled_data_file)
if not status:
log.info('CSV File Error: ' + str(msg))
if selector_status:
log.info("\\n================== Feature Selector has started ==================")
log.info("Status:-|... AION feature engineering started")
fs_mlstart = time.time()
selectorJson = config_obj.getEionSelectorConfiguration()
if self.textFeatures:
config_obj.updateFeatureSelection(selectorJson, codeConfigure, self.textFeatures)
log.info("-------> For vectorizer 'feature selection' is disabled and all the features will be used for training")
from feature_engineering.featureSelector import featureSelector
selectorObj = featureSelector()
dataFrame,targetColumn,self.topFeatures,self.modelSelTopFeatures,self.allFeatures,self.targetType,self.similarGroups,numericContinuousFeatures,discreteFeatures,nonNumericFeatures,categoricalFeatures,pcaModel,bpca_features,apca_features,featureEngineeringSelector = selectorObj.startSelector(dataFrame, selectorJson,self.textFeatures,targetFeature,problem_type)
if(str(pcaModel) != 'None'):
featureReduction = 'True'
status, msg = save_csv(dataFrame,reduction_data_file)
if not status:
log.info('CSV File Error: ' + str(msg))
pcaFileName = os.path.join(deployLocation,'model','pca'+iterName+'_'+iterVersion+'.sav')
pcaModel_pickle_file = 'pca'+iterName+'_'+iterVersion+'.sav'
pickle.dump(pcaModel, open(pcaFileName, 'wb'))
if not xtest.empty:
xtest = pd.DataFrame(pcaModel.transform(xtest),columns= apca_features)
if targetColumn in self.topFeatures:
self.topFeatures.remove(targetColumn)
fs_mlexecutionTime=time.time() - fs_mlstart
log.info('-------> COMPUTING: Total Feature Selection Execution Time '+str(fs_mlexecutionTime))
log.info('================== Feature Selection completed ==================\\n')
log.info("Status:-|... AION feature engineering completed")
if deeplearner_status or learner_status:
log.info('Status:-|... AION training started')
ldp_mlstart = time.time()
balancingMethod = config_obj.getAIONDataBalancingMethod()
from learner.machinelearning import machinelearning
mlobj = machinelearning()
modelType = problem_type.lower()
targetColumn = targetFeature
if modelType == "na":
if self.targetType == 'categorical':
modelType = 'classification'
elif self.targetType == 'continuous':
modelType = 'regression'
else:
modelType='clustering'
datacolumns=list(dataFrame.columns)
if targetColumn in datacolumns:
datacolumns.remove(targetColumn)
features =datacolumns
featureData = dataFrame[features]
if(modelType == 'clustering') or (modelType == 'topicmodelling'):
xtrain = featureData
ytrain = pd.DataFrame()
xtest = featureData
ytest = pd.DataFrame()
elif (targetColumn!=''):
xtrain = dataFrame[features]
ytrain = dataFrame[targetColumn]
else:
pass
categoryCountList = []
if modelType == 'classification':
if(mlobj.checkForClassBalancing(ytrain) >= 1):
xtrain,ytrain = mlobj.ExecuteClassBalancing(xtrain,ytrain,balancingMethod)
valueCount=targetData.value_counts()
categoryCountList=valueCount.tolist()
ldp_mlexecutionTime=time.time() - ldp_mlstart
log.info('-------> COMPUTING: Total Learner data preparation Execution Time '+str(ldp_mlexecutionTime))
if learner_status:
base_model_score=0
log.info('\\n================== ML Started ==================')
log.info('-------> Memory Usage by DataFrame During Learner Status '+str(dataFrame.memory_usage(deep=True).sum()))
mlstart = time.time()
log.info('-------> Target Problem Type:'+ self.targetType)
learner_type = 'ML'
learnerJson = config_obj.getEionLearnerConfiguration()
from learner.machinelearning import machinelearning
mlobj = machinelearning()
anomalyDetectionStatus = False
anomalyMethod =config_obj.getEionanomalyModels()
if modelType.lower() == "anomalydetection" or modelType.lower() == "timeseriesanomalydetection": #task 11997
anomalyDetectionStatus = True
if anomalyDetectionStatus == True :
datacolumns=list(dataFrame.columns)
if targetColumn in datacolumns:
datacolumns.remove(targetColumn)
if datetimeFeature in datacolumns:
datacolumns.remove(datetimeFeature)
self.features = datacolumns
from learner.anomalyDetector import anomalyDetector
anomalyDetectorObj=anomalyDetector()
model_type ="anomaly_detection"
saved_model = model_type+'_'+iterName+'_'+iterVersion+'.sav'
if problem_type.lower() == "timeseriesanomalydetection": #task 11997
anomalyconfig = config_obj.getAIONTSAnomalyDetectionConfiguration()
modelType = "TimeSeriesAnomalyDetection"
else:
anomalyconfig = config_obj.getAIONAnomalyDetectionConfiguration()
testPercentage = config_obj.getAIONTestTrainPercentage()
##Multivariate feature based anomaly detection status from gui (true/false)
mv_featurebased_selection = config_obj.getMVFeaturebasedAD()
mv_featurebased_ad_status=str(mv_featurebased_selection['uniVariate'])
model,estimator,matrix,trainmatrix,score,labelMaps=anomalyDetectorObj.startanomalydetector(dataFrame,targetColumn,labelMaps,inlierLabels,learnerJson,model_type,saved_model,anomalyMethod,deployLocation,predicted_data_file,testPercentage,anomalyconfig,datetimeFeature,mv_featurebased_ad_status) #Unnati
score = 'NA'
if(self.matrix != '{'):
self.matrix += ','
self.matrix += matrix
if(self.trainmatrix != '{'):
self.trainmatrix += ','
self.trainmatrix += trainmatrix
scoreParam = 'NA'
scoredetails = f'{{"Model":"{model}","Score":"{score}"}}'
if model_tried != '':
model_tried += ','
model_tried += scoredetails
model = anomalyMethod
else:
log.info('-------> Target Problem Type:'+ self.targetType)
log.info('-------> Target Model Type:'+ modelType)
if(modelType == 'regression'):
allowedmatrix = ['mse','r2','rmse','mae']
if(scoreParam.lower() not in allowedmatrix):
scoreParam = 'mse'
if(modelType == 'classification'):
allowedmatrix = ['accuracy','recall','f1_score','precision','roc_auc']
if(scoreParam.lower() not in allowedmatrix):
scoreParam = 'accuracy'
scoreParam = scoreParam.lower()
codeConfigure.update_config('scoring_criteria',scoreParam)
modelParams,modelList = config_obj.getEionLearnerModelParams(modelType)
status,model_type,model,saved_model,matrix,trainmatrix,featureDataShape,model_tried,score,filename,self.features,threshold,pscore,rscore,self.method,loaded_model,xtrain1,ytrain1,xtest1,ytest1,topics,params=mlobj.startLearning(learnerJson,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,self.topFeatures,self.modelSelTopFeatures,self.allFeatures,self.targetType,deployLocation,iterName,iterVersion,trained_data_file,predicted_data_file,labelMaps,'MB',codeConfigure,featureEngineeringSelector,config_obj.getModelEvaluationConfig(),imageFolderLocation)
#Getting model,data for ensemble calculation
e_model=loaded_model
base_model_score=score
if(self.matrix != '{'):
self.matrix += ','
if(self.trainmatrix != '{'):
self.trainmatrix += ','
self.trainmatrix += trainmatrix
self.matrix += matrix
mlexecutionTime=time.time() - mlstart
log.info('-------> Total ML Execution Time '+str(mlexecutionTime))
log.info('================== ML Completed ==================\\n')
if deeplearner_status:
learner_type = 'DL'
log.info('Status:- |... AION DL training started')
from dlearning.deeplearning import deeplearning
dlobj = deeplearning()
from learner.machinelearning import machinelearning
mlobj = machinelearning()
log.info('\\n================== DL Started ==================')
dlstart |
= time.time()
deeplearnerJson = config_obj.getEionDeepLearnerConfiguration()
targetColumn = targetFeature
method = deeplearnerJson['optimizationMethod']
optimizationHyperParameter = deeplearn |
_inv[:, targetColIndx]
predout = predout.reshape(len(pred_1d),1)
#y_future.append(predout)
col = targetFeature.split(",")
pred = pd.DataFrame(index=range(0,len(predout)),columns=col)
for i in range(0, len(predout)):
pred.iloc[i] = predout[i]
predictions = pred
log.info("-------> Predictions")
log.info(predictions)
forecast_output = predictions.to_json(orient='records')
elif (model.lower() == 'mlp' or model.lower() == 'lstm'):
sfeatures.remove(datetimeFeature)
self.features = sfeatures
if len(sfeatures) == 1:
xt = xtrain[self.features].values
else:
xt = xtrain[self.features].values
with open(scalertransformationFile, 'rb') as f:
loaded_scaler_model = pickle.load(f)
f.close()
xt = xt.astype('float32')
xt = loaded_scaler_model.transform(xt)
pred_data = xt
y_future = []
for i in range(no_of_prediction):
pdata = pred_data[-lag_order:]
if model.lower() == 'mlp':
pdata = pdata.reshape((1,lag_order))
else:
pdata = pdata.reshape((1,lag_order, len(sfeatures)))
if (len(sfeatures) > 1):
pred = loaded_model.predict(pdata)
predout = loaded_scaler_model.inverse_transform(pred)
y_future.append(predout)
pred_data=np.append(pred_data,pred,axis=0)
else:
pred = loaded_model.predict(pdata)
predout = loaded_scaler_model.inverse_transform(pred)
y_future.append(predout.flatten()[-1])
pred_data = np.append(pred_data,pred)
col = targetFeature.split(",")
pred = pd.DataFrame(index=range(0,len(y_future)),columns=col)
for i in range(0, len(y_future)):
pred.iloc[i] = y_future[i]
predictions = pred
log.info("-------> Predictions")
log.info(predictions)
forecast_output = predictions.to_json(orient='records')
else:
pass
log.info('Status:-|... AION TimeSeries Forecasting completed') #task 11997
log.info("------ Forecast Prediction End -------------\\n")
log.info('================ Time Series Forecasting Completed ================\\n') #task 11997
if recommenderStatus:
log.info('\\n================ Recommender Started ================ ')
log.info('Status:-|... AION Recommender started')
learner_type = 'RecommenderSystem'
model_type = 'RecommenderSystem'
modelType = model_type
model = model_type
targetColumn=''
datacolumns=list(dataFrame.columns)
self.features=datacolumns
svd_params = config_obj.getEionRecommenderConfiguration()
from recommender.item_rating import recommendersystem
recommendersystemObj = recommendersystem(modelFeatures,svd_params)
testPercentage = config_obj.getAIONTestTrainPercentage() #Unnati
saved_model,rmatrix,score,trainingperformancematrix,model_tried = recommendersystemObj.recommender_model(dataFrame,outputLocation)
scoreParam = 'NA' #Task 11190
log.info('Status:-|... AION Recommender completed')
log.info('================ Recommender Completed ================\\n')
if textsummarizationStatus:
log.info('\\n================ text Summarization Started ================ ')
log.info('Status:-|... AION text Summarization started')
modelType = 'textsummarization'
model_type = 'textsummarization'
learner_type = 'Text Summarization'
modelName='TextSummarization'
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestClassifier
from scipy import spatial
model = model_type
dataLocationTS,deployLocationTS,KeyWordsTS,pathForKeywordFileTS = config_obj.getEionTextSummarizationConfig()
#print("dataLocationTS",dataLocationTS)
#print("deployLocationTS",deployLocationTS)
#print("KeyWordsTS",KeyWordsTS)
#print("pathForKeywordFileTS",pathForKeywordFileTS)
#PreTrained Model Download starts-------------------------
from appbe.dataPath import DATA_DIR
preTrainedModellocation = Path(DATA_DIR)/'PreTrainedModels'/'TextSummarization'
preTrainedModellocation = Path(DATA_DIR)/'PreTrainedModels'/'TextSummarization'
models = {'glove':{50:'glove.6B.50d.w2vformat.txt'}}
supported_models = [x for y in models.values() for x in y.values()]
modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextSummarization'
Path(modelsPath).mkdir(parents=True, exist_ok=True)
p = Path(modelsPath).glob('**/*')
modelsDownloaded = [x.name for x in p if x.name in supported_models]
selected_model="glove.6B.50d.w2vformat.txt"
if selected_model not in modelsDownloaded:
print("Model not in folder, downloading")
import urllib.request
location = Path(modelsPath)
local_file_path = location/f"glove.6B.50d.w2vformat.txt"
urllib.request.urlretrieve(f'https://aion-pretrained-models.s3.ap-south-1.amazonaws.com/text/glove.6B.50d.w2vformat.txt', local_file_path)
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
tokenizer = AutoTokenizer.from_pretrained("sshleifer/distilbart-cnn-12-6")
model = AutoModelForSeq2SeqLM.from_pretrained("sshleifer/distilbart-cnn-12-6")
tokenizer.save_pretrained(preTrainedModellocation)
model.save_pretrained(preTrainedModellocation)
#PreTrained Model Download ends-----------------------
deployLocationData=deployLocation+"\\\\data\\\\"
modelLocation=Path(DATA_DIR)/'PreTrainedModels'/'TextSummarization'/'glove.6B.50d.w2vformat.txt'
KeyWordsTS=KeyWordsTS.replace(",", " ")
noOfKeyword = len(KeyWordsTS.split())
keywords = KeyWordsTS.split()
embeddings = {}
word = ''
with open(modelLocation, 'r', encoding="utf8") as f:
header = f.readline()
header = header.split(' ')
vocab_size = int(header[0])
embed_size = int(header[1])
for i in range(vocab_size):
data = f.readline().strip().split(' ')
word = data[0]
embeddings[word] = [float(x) for x in data[1:]]
readData=pd.read_csv(pathForKeywordFileTS,encoding='utf-8',encoding_errors= 'replace')
for i in range(noOfKeyword):
terms=(sorted(embeddings.keys(), key=lambda word: spatial.distance.euclidean(embeddings[word], embeddings[keywords[i]])) )[1:6]
readData = readData.append({'Keyword': keywords[i]}, ignore_index=True)
for j in range(len(terms)):
readData = readData.append({'Keyword': terms[j]}, ignore_index=True)
deployLocationDataKwDbFile=deployLocationData+"keywordDataBase.csv"
readData.to_csv(deployLocationDataKwDbFile,encoding='utf-8',index=False)
datalocation_path=dataLocationTS
path=Path(datalocation_path)
fileList=os.listdir(path)
textExtraction = pd.DataFrame()
textExtraction['Sentences']=""
rowIndex=0
for i in range(len(fileList)):
fileName=str(datalocation_path)+"\\\\"+str(fileList[i])
if fileName.endswith(".pdf"):
print("\\n files ",fileList[i])
from pypdf import PdfReader
reader = PdfReader(fileName)
number_of_pages = len(reader.pages)
text=""
textOutputForFile=""
OrgTextOutputForFile=""
for i in range(number_of_pages) :
page = reader.pages[i]
text1 = page.extract_text()
text=text+text1
import nltk
tokens = nltk.sent_tokenize(text)
for sentence in tokens:
sentence=sentence.replace("\\n", " ")
if (len(sentence.split()) < 4 ) or (len(str(sentence.split(',')).split()) < 8)or (any(chr.isdigit() for chr in sentence)) :
continue
textExtraction.at[rowIndex,'Sentences']=str(sentence.strip())
rowIndex=rowIndex+1
if fileName.endswith(".txt"):
print("\\n txt files",fileList[i])
data=[]
with open(fileName, "r",encoding="utf-8") as f:
data.append(f.read())
str1 = ""
for ele in data:
str1 += ele
sentences=str1.split(".")
count=0
for sentence in sentences:
count += 1
textExtraction.at[rowIndex+i,'Sentences']=str(sentence.strip())
rowIndex=rowIndex+1
df=textExtraction
#print("textExtraction",textExtraction)
deployLocationDataPreProcessData=deployLocationData+"preprocesseddata.csv"
save_csv_compressed(deployLocationDataPreProcessData, df, encoding='utf-8')
df['Label']=0
kw=pd.read_csv(deployLocationDataKwDbFile,encoding='utf-8',encoding_errors= 'replace')
Keyword_list = kw['Keyword'].tolist()
for i in df.index:
for x in Keyword_list:
if (str(df["Sentences"][i])).find(x) != -1:
df['Label'][i]=1
break
deployLocationDataPostProcessData=deployLocationData+"postprocesseddata.csv"
#df.to_csv(deployLocationDataPostProcessData,encoding='utf-8')
save_csv_compressed(deployLocationDataPostProcessData, df, encoding='utf-8')
labelledData=df
train_df=labelledData
labelencoder = LabelEncoder()
train_df['Sentences'] = labelencoder.fit_transform(train_df['Sentences'])
X = train_df.drop('Label',axis=1)
y = train_df['Label']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
Classifier = RandomForestClassifier(n_estimators = 10, random_state = 42)
modelTs=Classifier.fit(X, y)
import pickle
deployLocationTS=deployLocation+"\\\\model\\\\"+iterName+'_'+iterVersion+'.sav'
deployLocationTS2=deployLocation+"\\\\model\\\\"+"classificationModel.sav"
pickle.dump(modelTs, open(deployLocationTS, 'wb'))
pickle.dump(modelTs, open(deployLocationTS2, 'wb'))
print("\\n trainModel Ends")
saved_model = 'textsummarization_'+iterName+'_'+iterVersion
log.info('Status:-|... AION text summarization completed')
model = learner_type
log.info('================ text summarization Completed ================\\n')
if survival_analysis_status:
sa_method = config_obj.getEionanomalyModels()
labeldict = {}
log.info('\\n================ SurvivalAnalysis Started ================ ')
log.info('Status:-|... AION SurvivalAnalysis started')
log.info('\\n================ SurvivalAnalysis DataFrame ================ ')
log.info(dataFrame)
from survival import survival_analysis
from learner.machinelearning import machinelearning
sa_obj = survival_analysis.SurvivalAnalysis(dataFrame, preprocess_pipe, sa_method, targetFeature, datetimeFeature, filter_expression, profilerObj.train_features_type)
if sa_obj != None:
predict_json = sa_obj.learn()
if sa_method.lower() in ['kaplanmeierfitter','kaplanmeier','kaplan-meier','kaplan meier','kaplan','km','kmf']:
predicted = sa_obj.models[0].predict(dataFrame[datetimeFeature])
status, msg = save_csv(predicted,predicted_data_file)
if not status:
log.info('CSV File Error: ' + str(msg))
self.features = [datetimeFeature]
elif sa_method.lower() in ['coxphfitter','coxregression','cox-regression','cox regression','coxproportionalhazard','coxph','cox','cph']:
predicted = sa_obj.models[0].predict_cumulative_hazard(dataFrame)
datacolumns = list(dataFrame.columns)
targetColumn = targetFeature
if targetColumn in datacolumns:
datacolumns.remove(targetColumn)
self.features = datacolumns
score = sa_obj.score
scoreParam = 'Concordance_Index'
status,msg = save_csv(predicted,predicted_data_file)
if not status:
log.info('CSV File Error: ' + str(msg))
model = sa_method
modelType = "SurvivalAnalysis"
model_type = "SurvivalAnalysis"
modelName = sa_method
i = 1
for mdl in sa_obj.models:
saved_model = "%s_%s_%s_%d.sav"%(model_type,sa_method,iterVersion,i)
pickle.dump(mdl, open(os.path.join(deployLocation,'model',saved_model), 'wb')),
i+=1
p = 1
for plot in sa_obj.plots:
img_name = "%s_%d.png"%(sa_method,p)
img_location = os.path.join(imageFolderLocation,img_name |
)
plot.savefig(img_location,bbox_inches='tight')
sa_images.append(img_location)
p+=1
log.info('Status:-|... AION SurvivalAnalysis completed')
log.info('\\n================ SurvivalAnalysis Completed ================ ')
if visualizationstatus:
visualizationJson = config_obj.getEionVisualizationConfiguration()
log.info('\\n================== Visualization Recommendation Started ==================')
visualizer_mlstart = time.time()
from visualization.visualization import Visualization
visualizationObj = Visualization(iterName,iterVersion,dataFrame,visualizationJson,datetimeFeature,deployLocation,dataFolderLocation,numericContinuousFeatures,discreteFeatures,categoricalFeatures,self.features,targetFeature,model_type,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,labelMaps,self.vectorizerFeatures,self.textFeatures,self.numericalFeatures,self.nonNumericFeatures,self.emptyFeatures,self.dfrows,self.dfcols,saved_model,scoreParam,learner_type,model,featureReduction,reduction_data_file)
visualizationObj.visualizationrecommandsystem()
visualizer_mlexecutionTime=time.time() - visualizer_mlstart
log.info('-------> COMPUTING: Total Visualizer Execution Time '+str(visualizer_mlexecutionTime))
log.info('================== Visualization Recommendation Started ==================\\n')
if similarityIdentificationStatus or contextualSearchStatus:
datacolumns=list(dataFrame.columns)
features = modelFeatures.split(",")
if indexFeature != '' and indexFeature != 'NA':
iFeature = indexFeature.split(",")
for ifea in iFeature:
if ifea not in features:
features.append(ifea)
for x in features:
dataFrame[x] = similaritydf[x]
#get vectordb(chromadb) status selected
if similarityIdentificationStatus:
learner_type = 'similarityIdentification'
else:
learner_type = 'contextualSearch'
vecDBCosSearchStatus = config_obj.getVectorDBCosSearchStatus(learner_type)
if vecDBCosSearchStatus:
status, msg = save_chromadb(dataFrame, config_obj, trained_data_file, modelFeatures)
if not status:
log.info('Vector DB File Error: '+str(msg))
else:
status, msg = save_csv(dataFrame,trained_data_file)
if not status:
log.info('CSV File Error: '+str(msg))
self.features = datacolumns
model_type = config_obj.getAlgoName(problem_type)
model = model_type #bug 12833
model_tried = '{"Model":"'+model_type+'","FeatureEngineering":"NA","Score":"NA","ModelUncertainty":"NA"}'
modelType = learner_type
saved_model = learner_type
score = 'NA'
if deploy_status:
if str(model) != 'None':
log.info('\\n================== Deployment Started ==================')
log.info('Status:-|... AION Creating Prediction Service Start')
deployer_mlstart = time.time()
deployJson = config_obj.getEionDeployerConfiguration()
deploy_name = iterName+'_'+iterVersion
from prediction_package.model_deploy import DeploymentManager
if textsummarizationStatus :
deploy = DeploymentManager()
deploy.deployTSum(deployLocation,preTrainedModellocation)
codeConfigure.save_config(deployLocation)
deployer_mlexecutionTime=time.time() - deployer_mlstart
log.info('-------> COMPUTING: Total Deployer Execution Time '+str(deployer_mlexecutionTime))
log.info('Status:-|... AION Deployer completed')
log.info('================== Deployment Completed ==================')
else:
deploy = DeploymentManager()
deploy.deploy_model(deploy_name,deployJson,learner_type,model_type,model,scoreParam,saved_model,deployLocation,self.features,self.profilerAction,dataLocation,labelMaps,column_merge_flag,self.textFeatures,self.numericalFeatures,self.nonNumericFeatures,preprocessing_pipe,numericToLabel_json,threshold,loss_matrix,optimizer,firstDocFeature,secondDocFeature,padding_length,trained_data_file,dictDiffCount,targetFeature,normalizer_pickle_file,normFeatures,pcaModel_pickle_file,bpca_features,apca_features,self.method,deployFolder,iterName,iterVersion,self.wordToNumericFeatures,imageconfig,sessonal_freq,additional_regressors,grouperbyjson,rowfilterexpression,xtrain,profiled_data_file,conversion_method,modelFeatures,indexFeature,lag_order,scalertransformationFile,noofforecasts,preprocess_pipe,preprocess_out_columns, label_encoder,datetimeFeature,usecaseLocation,deploy_config)
codeConfigure.update_config('deploy_path',os.path.join(deployLocation,'publish'))
codeConfigure.save_config(deployLocation)
deployer_mlexecutionTime=time.time() - deployer_mlstart
log.info('-------> COMPUTING: Total Deployer Execution Time '+str(deployer_mlexecutionTime))
log.info('Status:-|... AION Creating Prediction Service completed')
log.info('================== Deployment Completed ==================')
if not outputDriftStatus and not inputDriftStatus:
from transformations.dataProfiler import set_features
self.features = set_features(self.features,profilerObj)
self.matrix += '}'
self.trainmatrix += '}'
print(model_tried)
model_tried = eval('['+model_tried+']')
matrix = eval(self.matrix)
trainmatrix = eval(self.trainmatrix)
deployPath = deployLocation.replace(os.sep, '/')
if survival_analysis_status:
output_json = {"status":"SUCCESS","data":{"ModelType":modelType,"deployLocation":deployPath,"BestModel":model,"BestScore":str(score),"ScoreType":str(scoreParam).upper(),"matrix":matrix,"survivalProbability":json.loads(predict_json),"featuresused":str(self.features),"targetFeature":str(targetColumn),"EvaluatedModels":model_tried,"imageLocation":str(sa_images),"LogFile":logFileName}}
elif not timeseriesStatus:
try:
json.dumps(params)
output_json = {"status":"SUCCESS","data":{"ModelType":modelType,"deployLocation":deployPath,"BestModel":model,"BestScore":str(score),"ScoreType":str(scoreParam).upper(),"matrix":matrix,"trainmatrix":trainmatrix,"featuresused":str(self.features),"targetFeature":str(targetColumn),"params":params,"EvaluatedModels":model_tried,"LogFile":logFileName}}
except:
output_json = {"status":"SUCCESS","data":{"ModelType":modelType,"deployLocation":deployPath,"BestModel":model,"BestScore":str(score),"ScoreType":str(scoreParam).upper(),"matrix":matrix,"trainmatrix":trainmatrix,"featuresused":str(self.features),"targetFeature":str(targetColumn),"params":"","EvaluatedModels":model_tried,"LogFile":logFileName}}
else:
if config_obj.summarize:
modelType = 'Summarization'
output_json = {"status":"SUCCESS","data":{"ModelType":modelType,"deployLocation":deployPath,"BestModel":model,"BestScore":str(score),"ScoreType":str(scoreParam).upper(),"matrix":matrix,"featuresused":str(self.features),"targetFeature":str(targetColumn),"EvaluatedModels":model_tried,'forecasts':json.loads(forecast_output),"LogFile":logFileName}}
if bool(topics) == True:
output_json['topics'] = topics
with open(outputjsonFile, 'w',encoding='utf-8') as f:
json.dump(output_json, f)
f.close()
output_json = json.dumps(output_json)
log.info('\\n------------- Summary ------------')
log.info('------->No of rows & columns in data:('+str(self.dfrows)+','+str(self.dfcols)+')')
log.info('------->No of missing Features :'+str(len(self.mFeatures)))
log.info('------->Missing Features:'+str(self.mFeatures))
log.info('------->Text Features:'+str(self.textFeatures))
log.info('------->No of Nonnumeric Features :'+str(len(self.nonNumericFeatures)))
log.info('------->Non-Numeric Features :' +str(self.nonNumericFeatures))
if threshold == -1:
log.info('------->Threshold: NA')
else:
log.info('------->Threshold: '+str(threshold))
log.info('------->Label Maps of Target Feature for classification :'+str(labelMaps))
for i in range(0,len(self.similarGroups)):
log.info('------->Similar Groups '+str(i+1)+' '+str(self.similarGroups[i]))
if((learner_type != 'TS') & (learner_type != 'AR')):
log.info('------->No of columns and rows used for Modeling :'+str(featureDataShape))
log.info('------->Features Used for Modeling:'+str(self.features))
log.info('------->Target Feature: '+str(targetColumn))
log.info('------->Best Model Score :'+str(score))
log.info('------->Best Parameters:'+str(params))
log.info('------->Type of Model :'+str(modelType))
log.info('------->Best Model :'+str(model))
log.info('------------- Summary ------------\\n')
log.info('Status:-|... AION Model Training Successfully Done')
except Exception as inst:
log.info('server code execution failed !....'+str(inst))
log.error(inst, exc_info = True)
output_json = {"status":"FAIL","message":str(inst).strip('"'),"LogFile":logFileName}
output_json = json.dumps(output_json)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
executionTime = timeit.default_timer() - startTime
log.info('\\nTotal execution time(sec) :'+str(executionTime))
log.info('\\n------------- Output JSON ------------')
log.info('aion_learner_status:'+str(output_json))
log.info('------------- Output JSON ------------\\n')
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
hdlr.close()
log.removeHandler(hdlr)
return output_json
def split_into_train_test_data(self,featureData,targetData,testPercentage,log,modelType='classification'): #Unnati
log.info('\\n-------------- Test Train Split ----------------')
if testPercentage == 0 or testPercentage == 100: #Unnati
xtrain=featureData
ytrain=targetData
xtest=pd.DataFrame()
ytest=pd.DataFrame()
else:
testSize= testPercentage/100 #Unnati
if modelType == 'regression':
log.info('-------> Split Type: Random Split')
xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,test_size=testSize,shuffle=True,random_state=42)
else:
try:
log.info('-------> Split Type: Stratify Split')
xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,stratify=targetData,test_size=testSize,random_state=42)
except Exception as ValueError:
count_unique = targetData.value_counts()
feature_with_single_count = count_unique[ count_unique == 1].index.tolist()
error = f"The least populated class in {feature_with_single_count} has only 1 member, which is too few. The minimum number of groups for any class cannot be less than 2"
raise Exception(error) from ValueError
except:
log.info('-------> Split Type: Random Split')
xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,test_size=testSize,shuffle=True,random_state=42)
log.info('Status:- !... Train / test split done: '+str(100-testPercentage)+'% train,'+str(testPercentage)+'% test') #Unnati
log.info('-------> Train Data Shape: '+str(xtrain.shape)+' ---------->')
log.info('-------> Test Data Shape: '+str(xtest.shape)+' ---------->')
log.info('-------------- Test Train Split End ----------------\\n')
return(xtrain,ytrain,xtest,ytest)
def aion_train_model(arg):
warnings.filterwarnings('ignore')
config_path = Path( arg)
with open( config_path, 'r') as f:
config = json.load( f)
log = set_log_handler(config['basic'])
log.info('************* Version - v'+AION_VERSION+' *************** \\n')
msg = '-------> Execution Start Time: '+ datetime.datetime.now(timezone("Asia/Kolkata")).strftime('%Y-%m-%d %H:%M:%S' + ' IST')
log.info(msg)
try:
config_validate(arg)
valid, msg = pushRecordForTraining()
if valid:
serverObj = server()
configObj = AionConfigManager()
codeConfigure = code_configure()
codeConfigure.create_config(config)
readConfistatus,msg = configObj.readConfigurationFile(config)
if(readConfistatus == False):
raise ValueError( msg)
output = serverObj.startScriptExecution(configObj, codeConfigure, log)
else:
output = {"status":"LicenseVerificationFailed","message":str(msg).strip('"')}
output = json.dumps(output)
print( f"\\naion_learner_status:{output}\\n")
log.info( f"\\naion_learner_status:{output}\\n")
except Exception as inst:
output = {"status":"FAIL","message":str(inst).strip('"')}
output = json.dumps(output)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
print(f"\\naion_learner_status:{output}\\n")
log.info( f"\\naion_learner_ |
status:{output}\\n")
return output
if __name__ == "__main__":
aion_train_model( sys.argv[1])
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import platform
import shutil
import subprocess
import sys
import glob
import json
def publish(data):
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
model = jsonData['modelName']
version = jsonData['modelVersion']
deployFolder = jsonData['deployLocation']
model = model.replace(" ", "_")
deployedPath = os.path.join(deployFolder,model+'_'+version)
deployedPath = os.path.join(deployedPath,'WHEELfile')
whlfilename='na'
if os.path.isdir(deployedPath):
for file in os.listdir(deployedPath):
if file.endswith(".whl"):
whlfilename = os.path.join(deployedPath,file)
if whlfilename != 'na':
subprocess.check_call([sys.executable, "-m", "pip", "uninstall","-y",model])
subprocess.check_call([sys.executable, "-m", "pip", "install", whlfilename])
status,pid,ip,port = check_service_running(jsonData['modelName'],jsonData['serviceFolder'])
if status == 'Running':
service_stop(json.dumps(jsonData))
service_start(json.dumps(jsonData))
output_json = {'status':"SUCCESS"}
output_json = json.dumps(output_json)
else:
output_json = {'status':'Error','Msg':'Installation Package not Found'}
output_json = json.dumps(output_json)
return(output_json)
def check_service_running(model,serviceFolder):
model = model.replace(" ", "_")
filename = model+'_service.py'
modelservicefile = os.path.join(serviceFolder,filename)
status = 'File Not Exist'
ip = ''
port = ''
pid = ''
if os.path.exists(modelservicefile):
status = 'File Exist'
import psutil
for proc in psutil.process_iter():
pinfo = proc.as_dict(attrs=['pid', 'name', 'cmdline','connections'])
if 'python' in pinfo['name']:
if filename in pinfo['cmdline'][1]:
status = 'Running'
pid = pinfo['pid']
for x in pinfo['connections']:
ip = x.laddr.ip
port = x.laddr.port
return(status,pid,ip,port)
def service_stop(data):
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
status,pid,ip,port = check_service_running(jsonData['modelName'],jsonData['serviceFolder'])
if status == 'Running':
import psutil
p = psutil.Process(int(pid))
p.terminate()
time.sleep(2)
output_json = {'status':'SUCCESS'}
output_json = json.dumps(output_json)
return(output_json)
def service_start(data):
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
model = jsonData['modelName']
version = jsonData['modelVersion']
ip = jsonData['ip']
port = jsonData['port']
deployFolder = jsonData['deployLocation']
serviceFolder = jsonData['serviceFolder']
model = model.replace(" ", "_")
deployLocation = os.path.join(deployFolder,model+'_'+version)
org_service_file = os.path.abspath(os.path.join(os.path.dirname(__file__),'model_service.py'))
filename = model+'_service.py'
modelservicefile = os.path.join(serviceFolder,filename)
status = 'File Not Exist'
if os.path.exists(modelservicefile):
status = 'File Exist'
r = ([line.split() for line in subprocess.check_output("tasklist").splitlines()])
for i in range(len(r)):
if filename in r[i]:
status = 'Running'
if status == 'File Not Exist':
shutil.copy(org_service_file,modelservicefile)
with open(modelservicefile, 'r+') as file:
content = file.read()
file.seek(0, 0)
line = 'from '+model+' import aion_performance'
file.write(line+"\\n")
line = 'from '+model+' import aion_drift'
file.write(line+ "\\n")
line = 'from '+model+' import featureslist'
file.write(line+ "\\n")
line = 'from '+model+' import aion_prediction'
file.write(line+ "\\n")
file.write(content)
file.close()
status = 'File Exist'
if status == 'File Exist':
status,pid,ipold,portold = check_service_running(jsonData['modelName'],jsonData['serviceFolder'])
if status != 'Running':
command = "python "+modelservicefile+' '+str(port)+' '+str(ip)
os.system('start cmd /c "'+command+'"')
time.sleep(2)
status = 'Running'
output_json = {'status':'SUCCESS','Msg':status}
output_json = json.dumps(output_json)
return(output_json)
if __name__ == "__main__":
aion_publish(sys.argv[1])
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import logging
logging.getLogger('tensorflow').disabled = True
#from autogluon.tabular import TabularDataset, TabularPredictor
#from autogluon.core.utils.utils import setup_outputdir
#from autogluon.core.utils.loaders import load_pkl
#from autogluon.core.utils.savers import save_pkl
import datetime, time, timeit
from datetime import datetime as dt
import os.path
import json
import io
import shutil
import sys
#from Gluon_MultilabelPredictor import MultilabelPredictor
class MultilabelPredictor():
""" Tabular Predictor for predicting multiple columns in table.
Creates multiple TabularPredictor objects which you can also use individually.
You can access the TabularPredictor for a particular label via: `multilabel_predictor.get_predictor(label_i)`
Parameters
----------
labels : List[str]
The ith element of this list is the column (i.e. `label`) predicted by the ith TabularPredictor stored in this object.
path : str
Path to directory where models and intermediate outputs should be saved.
If unspecified, a time-stamped folder called "AutogluonModels/ag-[TIMESTAMP]" will be created in the working directory to store all models.
Note: To call `fit()` twice and save all results of each fit, you must specify different `path` locations or don't specify `path` at all.
Otherwise files from first `fit()` will be overwritten by second `fit()`.
Caution: when predicting many labels, this directory may grow large as it needs to store many TabularPredictors.
problem_types : List[str]
The ith element is the `problem_type` for the ith TabularPredictor stored in this object.
eval_metrics : List[str]
The ith element is the `eval_metric` for the ith TabularPredictor stored in this object.
consider_labels_correlation : bool
Whether the predictions of multiple labels should account for label correlations or predict each label independently of the others.
If True, the ordering of `labels` may affect resulting accuracy as each label is predicted conditional on the previous labels appearing earlier in this list (i.e. in an auto-regressive fashion).
Set to False if during inference you may want to individually use just the ith TabularPredictor without predicting all the other labels.
kwargs :
Arguments passed into the initialization of each TabularPredictor.
"""
multi_predictor_file = 'multilabel_predictor.pkl'
def __init__(self, labels, path, problem_types=None, eval_metrics=None, consider_labels_correlation=True, **kwargs):
if len(labels) < 2:
raise ValueError("MultilabelPredictor is only intended for predicting MULTIPLE labels (columns), use TabularPredictor for predicting one label (column).")
self.path = setup_outputdir(path, warn_if_exist=False)
self.labels = labels
#print(self.labels)
self.consider_labels_correlation = consider_labels_correlation
self.predictors = {} # key = label, value = TabularPredictor or str path to the TabularPredictor for this label
if eval_metrics is None:
self.eval_metrics = {}
else:
self.eval_metrics = {labels[i] : eval_metrics[i] for i in range(len(labels))}
problem_type = None
eval_metric = None
for i in range(len(labels)):
label = labels[i]
path_i = self.path + "Predictor_" + label
if problem_types is not None:
problem_type = problem_types[i]
if eval_metrics is not None:
eval_metric = self.eval_metrics[i]
self.predictors[label] = TabularPredictor(label=label, problem_type=problem_type, eval_metric=eval_metric, path=path_i, **kwargs)
def fit(self, train_data, tuning_data=None, **kwargs):
""" Fits a separate TabularPredictor to predict each of the labels.
Parameters
----------
train_data, tuning_data : str or autogluon.tabular.TabularDataset or pd.DataFrame
See documentation for `TabularPredictor.fit()`.
kwargs :
Arguments passed into the `fit()` call for each TabularPredictor.
"""
if isinstance(train_data, str):
train_data = TabularDataset(train_data)
if tuning_data is not None and isinstance(tuning_data, str):
tuning_data = TabularDataset(tuning_data)
train_data_og = train_data.copy()
if tuning_data is not None:
tuning_data_og = tuning_data.copy()
save_metrics = len(self.eval_metrics) == 0
for i in range(len(self.labels)):
label = self.labels[i]
predictor = self.get_predictor(label)
if not self.consider_labels_correlation:
labels_to_drop = [l for l in self.labels if l!=label]
else:
labels_to_drop = [self.labels[j] for j in range(i+1,len(self.labels))]
train_data = train_data_og.drop(labels_to_drop, axis=1)
if tuning_data is not None:
tuning_data = tuning_data_og.drop(labels_to_drop, axis=1)
print(f"Fitting TabularPredictor for label: {label} ...")
predictor.fit(train_data=train_data, tuning_data=tuning_data, **kwargs)
self.predictors[label] = predictor.path
if save_metrics:
self.eval_metrics[label] = predictor.eval_metric
self.save()
def eval_metrics(self):
return(self.eval_metrics)
def predict(self, data, **kwargs):
""" Returns DataFrame with label columns containing predictions for each label.
Parameters
----------
data : str or autogluon.tabular.TabularDataset or pd.DataFrame
Data to make predictions for. If label columns are present in this data, they will be ignored. See documentation for `TabularPredictor.predict()`.
kwargs :
Arguments passed into the predict() call for each TabularPredictor.
"""
return self._predict(data, as_proba=False, **kwargs)
def predict_proba(self, data, **kwargs):
""" Returns dict where each key is a label and the corresponding value is the `predict_proba()` output for just that label.
Parameters
----------
data : str or autogluon.tabular.TabularDataset or pd.DataFrame
Data to make predictions for. See documentation for `TabularPredictor.predict()` and `TabularPredictor.predict_proba()`.
kwargs :
Arguments passed into the `predict_proba()` call for each TabularPredictor (also passed into a `predict()` call).
"""
return self._predict(data, as_proba=True, **kwargs)
def evaluate(self, data, **kwargs):
""" Returns dict where each key is a label and the corresponding value is the `evaluate()` output for just that label.
Parameters
----------
data : str or autogluon.tabular.TabularDataset or pd.DataFrame
Data to evalate predictions of all labels for, must contain all labels as columns. See documentation for `TabularPredictor.evaluate()`.
kwargs :
Arguments passed into the `evaluate()` call for each TabularPredictor (also passed into the `predict()` call).
"""
data = self._get_data(data)
eval_dict = {}
for label in self.labels:
print(f"Evaluating TabularPredictor for label: {label} ...")
predictor = self.get_predictor(label)
eval_dict[label] = predictor.evaluate(data, **kwargs)
if self.consider_labels_correlation:
data[label] = predictor.predict(data, **kwargs)
return eval_dict
def save(self):
""" Save MultilabelPredictor to disk. """
for label in self.labels:
if not isinstance(self.predictors[label], str):
self.predictors[label] = self.predictors[label].path
save_pkl.save(path=self.path+self.multi_predictor_file, object=self)
print(f"MultilabelPredictor saved to disk. Load with: MultilabelPredictor.load('{self.path |
}')")
@classmethod
def load(cls, path):
""" Load MultilabelPredictor from disk `path` previously specified when creating this MultilabelPredictor. """
path = os.path.expanduser(path)
if path[-1] != os.path.sep:
path = path + os.path.sep
return load_pkl.load(path=path+cls.multi_predictor_file)
def get_predictor(self, label):
""" Returns TabularPredictor which is used to predict this label. """
predictor = self.predictors[label]
if isinstance(predictor, str):
return TabularPredictor.load(path=predictor)
return predictor
def _get_data(self, data):
if isinstance(data, str):
return TabularDataset(data)
return data.copy()
def _predict(self, data, as_proba=False, **kwargs):
data = self._get_data(data)
if as_proba:
predproba_dict = {}
for label in self.labels:
print(f"Predicting with TabularPredictor for label: {label} ...")
predictor = self.get_predictor(label)
if as_proba:
predproba_dict[label] = predictor.predict_proba(data, as_multiclass=True, **kwargs)
data[label] = predictor.predict(data, **kwargs)
if not as_proba:
return data[self.labels]
else:
return predproba_dict
def aion_train_gluon(arg):
configFile = arg
with open(configFile, 'rb') as cfile:
data = json.load(cfile)
cfile.close()
rootElement = data['basic']
modelname = rootElement['modelName']
version = rootElement['modelVersion']
dataLocation = rootElement['dataLocation']
deployFolder = rootElement['deployLocation']
analysisType = rootElement['analysisType']
testPercentage = data['advance']['testPercentage']
deployLocation = os.path.join(deployFolder,modelname+'_'+version)
try:
os.makedirs(deployLocation)
except OSError as e:
shutil.rmtree(deployLocation)
os.makedirs(deployLocation)
logLocation = os.path.join(deployLocation,'log')
try:
os.makedirs(logLocation)
except OSError as e:
pass
etcLocation = os.path.join(deployLocation,'etc')
try:
os.makedirs(etcLocation)
except OSError as e:
pass
logFileName=os.path.join(deployLocation,'log','model_training_logs.log')
filehandler = logging.FileHandler(logFileName, 'w','utf-8')
formatter = logging.Formatter('%(message)s')
filehandler.setFormatter(formatter)
log = logging.getLogger('eion')
log.propagate = False
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
log.removeHandler(hdlr)
log.addHandler(filehandler)
log.setLevel(logging.INFO)
log.info('************* Version - v1.2.0 *************** \\n')
msg = '-------> Execution Start Time: '+ dt.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
log.info(msg)
dataLabels = rootElement['targetFeature'].split(',')
# Create and Write the config file used in Prediction
# ----------------------------------------------------------------------------#
tdata = TabularDataset(dataLocation)
#train_data = tdata
train_data = tdata.sample(frac = 0.8)
test_data = tdata.drop(train_data.index)
if rootElement['trainingFeatures'] != '':
trainingFeatures = rootElement['trainingFeatures'].split(',')
else:
trainingFeatures = list(train_data.columns)
features = trainingFeatures
for x in dataLabels:
if x not in features:
features.append(x)
indexFeature = rootElement['indexFeature']
if indexFeature != '':
indexFeature = indexFeature.split(',')
for x in indexFeature:
if x in features:
features.remove(x)
dateTimeFeature = rootElement['dateTimeFeature']
if dateTimeFeature != '':
dateTimeFeature = dateTimeFeature.split(',')
for x in dateTimeFeature:
if x in features:
features.remove(x)
train_data = train_data[features]
test_data = test_data[features]
configJsonFile = {"targetFeature":dataLabels,"features":",".join([feature for feature in features])}
configJsonFilePath = os.path.join(deployLocation,'etc','predictionConfig.json')
if len(dataLabels) == 1 and analysisType['multiLabelPrediction'] == "False":
dataLabels = rootElement['targetFeature']
with io.open(configJsonFilePath, 'w', encoding='utf8') as outfile:
str_ = json.dumps(configJsonFile, ensure_ascii=False)
outfile.write(str_)
# ----------------------------------------------------------------------------#
if analysisType['multiLabelPrediction'] == "True":
# Copy and Write the Predictiion script file into deployment location
# ----------------------------------------------------------------------------#
srcFile = os.path.join(os.path.dirname(__file__),'gluon','AION_Gluon_MultiLabelPrediction.py')
dstFile = os.path.join(deployLocation,'aion_predict.py')
shutil.copy(srcFile,dstFile)
# ----------------------------------------------------------------------------#
labels = dataLabels # which columns to predict based on the others
#problem_types = dataProblem_types # type of each prediction problem
save_path = os.path.join(deployLocation,'ModelPath') # specifies folder to store trained models
time_limit = 5 # how many seconds to train the TabularPredictor for each label
log.info('Status:-|... AION Gluon Start')
try:
if len(labels) < 2:
log.info('Status:-|... AION Evaluation Error: Target should be multiple column')
# ----------------------------------------------------------------------------#
output = {'status':'FAIL','message':'Number of target variable should be 2 or more than 2'}
else:
multi_predictor = MultilabelPredictor(labels=labels, path=save_path)
multi_predictor.fit(train_data, time_limit=time_limit)
log.info('Status:-|... AION Gluon Stop')
log.info('Status:-|... AION Evaluation Start')
trainevaluations = multi_predictor.evaluate(train_data)
testevaluations = multi_predictor.evaluate(test_data)
best_model = {}
for label in labels:
predictor_class = multi_predictor.get_predictor(label)
predictor_class.get_model_best()
best_model[label] = predictor_class.get_model_best()
log.info('Status:-|... AION Evaluation Stop')
# ----------------------------------------------------------------------------#
output = {'status':'SUCCESS','data':{'ModelType':'MultiLabelPrediction','EvaluatedModels':'','featuresused':'','BestModel':'AutoGluon','BestScore': '0', 'ScoreType': 'ACCURACY','deployLocation':deployLocation,'matrix':trainevaluations,'testmatrix':testevaluations,'BestModel':best_model, 'LogFile':logFileName}}
except Exception as inst:
log.info('Status:-|... AION Gluon Error')
output = {"status":"FAIL","message":str(inst).strip('"')}
if analysisType['multiModalLearning'] == "True":
from autogluon.core.utils.utils import get_cpu_count, get_gpu_count
from autogluon.text import TextPredictor
# check the system and then set the equivelent flag
# ----------------------------------------------------------------------------#
os.environ["AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU"] = "0"
if get_gpu_count() == 0:
os.environ["AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU"] = "1"
# ----------------------------------------------------------------------------#
# Copy and Write the Predictiion script file into deployment location
# ----------------------------------------------------------------------------#
srcFile = os.path.join(os.path.dirname(__file__),'gluon','AION_Gluon_MultiModalPrediction.py')
dstFile = os.path.join(deployLocation,'aion_predict.py')
shutil.copy(srcFile,dstFile)
time_limit = None # set to larger value in your applications
save_path = os.path.join(deployLocation,'text_prediction')
predictor = TextPredictor(label=dataLabels, path=save_path)
predictor.fit(train_data, time_limit=time_limit)
log.info('Status:-|... AION Gluon Stop')
log.info('Status:-|... AION Evaluation Start')
trainevaluations = predictor.evaluate(train_data)
log.info('Status:-|... AION Evaluation Stop')
# ----------------------------------------------------------------------------#
output = {'status':'SUCCESS','data':{'ModelType':'MultiModelLearning','EvaluatedModels':'','featuresused':'','BestModel':'AutoGluon','BestScore': '0', 'ScoreType': 'SCORE','deployLocation':deployLocation,'matrix':trainevaluations,'LogFile':logFileName}}
output = json.dumps(output)
print("\\n")
print("aion_learner_status:",output)
print("\\n")
log.info('\\n------------- Output JSON ------------')
log.info('-------> Output :'+str(output))
log.info('------------- Output JSON ------------\\n')
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
hdlr.close()
log.removeHandler(hdlr)
return(output)
if __name__ == "__main__":
aion_train_gluon(sys.argv[1])<s> import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__))))
<s> #from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from http.server import BaseHTTPRequestHandler,HTTPServer
#from SocketServer import ThreadingMixIn
from socketserver import ThreadingMixIn
from functools import partial
from http.server import SimpleHTTPRequestHandler, test
import base64
from appbe.dataPath import DEPLOY_LOCATION
'''
from augustus.core.ModelLoader import ModelLoader
from augustus.strict import modelLoader
'''
import pandas as pd
import os,sys
from os.path import expanduser
import platform
import numpy as np
import configparser
import threading
import subprocess
import argparse
from functools import partial
import re
import cgi
from datetime import datetime
import json
import sys
from datetime import datetime
user_records = {}
class LocalModelData(object):
models = {}
class HTTPRequestHandler(BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
username = kwargs.pop("username")
password = kwargs.pop("password")
self._auth = base64.b64encode(f"{username}:{password}".encode()).decode()
super().__init__(*args)
def do_HEAD(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
def do_AUTHHEAD(self):
self.send_response(401)
self.send_header("WWW-Authenticate", 'Basic realm="Test"')
self.send_header("Content-type", "text/html")
self.end_headers()
def do_POST(self):
print("PYTHON ######## REQUEST ####### STARTED")
if None != re.search('/AION/', self.path):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
if ctype == 'application/json':
if self.headers.get("Authorization") == None:
self.do_AUTHHEAD()
resp = "Authentication Failed: Auth Header Not Present"
resp=resp.encode()
self.wfile.write(resp)
elif self.headers.get("Authorization") == "Basic " + self._auth:
length = int(self.headers.get('content-length'))
#data = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1)
data = self.rfile.read(length)
#print(data)
#keyList = list(data.keys())
#print(keyList[0])
model = self.path.split('/')[-2]
operation = self.path.split('/')[-1]
home = expanduser("~")
#data = json.loads(data)
dataStr = data
model_path = os.path.join(DEPLOY_LOCATION,model)
isdir = os.path.isdir(model_path)
if isdir:
if operation.lower() == 'predict':
predict_path = os.path.join(model_path,'aion_predict.py')
outputStr = subprocess.check_output([sys.executable,predict_path,dataStr])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
resp = outputStr
elif operation.lower() == 'spredict':
try:
predict_path = os.path.join(model_path,'aion_spredict.py')
print(predict_path)
outputStr = subprocess.check_output([sys.executable,predict_path,dataStr])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
resp = outputStr
except Exception as e:
print(e)
elif operation.lower() == 'features':
predict_path = os.path.join(model_path,'featureslist.py')
outputStr = subprocess.check_output([sys |
.executable,predict_path,dataStr])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
resp = outputStr
elif operation.lower() == 'explain':
predict_path = os.path.join(model_path,'explainable_ai.py')
outputStr = subprocess.check_output([sys.executable,predict_path,'local',dataStr])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'aion_ai_explanation:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
elif operation.lower() == 'monitoring':
predict_path = os.path.join(model_path,'aion_ipdrift.py')
outputStr = subprocess.check_output([sys.executable,predict_path,dataStr])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'drift:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
elif operation.lower() == 'performance':
predict_path = os.path.join(model_path,'aion_opdrift.py')
outputStr = subprocess.check_output([sys.executable,predict_path,dataStr])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'drift:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
elif operation.lower() == 'pattern_anomaly_predict':
data = json.loads(data)
anomaly = False
remarks = ''
clusterid = -1
configfilename = os.path.join(model_path,'datadetails.json')
filename = os.path.join(model_path,'clickstream.json')
clusterfilename = os.path.join(model_path,'stateClustering.csv')
probfilename = os.path.join(model_path,'stateTransitionProbability.csv')
dfclus = pd.read_csv(clusterfilename)
dfprod = pd.read_csv(probfilename)
f = open(configfilename, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
activity = configSettingsJson['activity']
sessionid = configSettingsJson['sessionid']
f = open(filename, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
groupswitching = configSettingsJson['groupswitching']
page_threshold = configSettingsJson['transitionprobability']
chain_count = configSettingsJson['transitionsequence']
chain_probability = configSettingsJson['sequencethreshold']
currentactivity = data[activity]
if bool(user_records):
sessionid = data[sessionid]
if sessionid != user_records['SessionID']:
user_records['SessionID'] = sessionid
prevactivity = ''
user_records['probarry'] = []
user_records['prevclusterid'] = -1
user_records['NoOfClusterHopping'] = 0
user_records['pageclicks'] = 1
else:
prevactivity = user_records['Activity']
user_records['Activity'] = currentactivity
pageswitch = True
if prevactivity == currentactivity or prevactivity == '':
probability = 0
pageswitch = False
remarks = ''
else:
user_records['pageclicks'] += 1
df1 = dfprod[(dfprod['State'] == prevactivity) & (dfprod['NextState'] == currentactivity)]
if df1.empty:
remarks = 'Anomaly Detected - User in unusual state'
anomaly = True
clusterid = -1
probability = 0
user_records['probarry'].append(probability)
n=int(chain_count)
num_list = user_records['probarry'][-n:]
avg = sum(num_list)/len(num_list)
for index, row in dfclus.iterrows():
clusterlist = row["clusterlist"]
if currentactivity in clusterlist:
clusterid = row["clusterid"]
else:
probability = df1['Probability'].iloc[0]
user_records['probarry'].append(probability)
n=int(chain_count)
num_list = user_records['probarry'][-n:]
davg = sum(num_list)/len(num_list)
for index, row in dfclus.iterrows():
clusterlist = row["clusterlist"]
if currentactivity in clusterlist:
clusterid = row["clusterid"]
remarks = ''
if user_records['prevclusterid'] != -1:
if probability == 0 and user_records['prevclusterid'] != clusterid:
user_records['NoOfClusterHopping'] = user_records['NoOfClusterHopping']+1
if user_records['pageclicks'] == 1:
remarks = 'Anomaly Detected - Frequent Cluster Hopping'
anomaly = True
else:
remarks = 'Cluster Hopping Detected'
user_records['pageclicks'] = 0
if user_records['NoOfClusterHopping'] > int(groupswitching) and anomaly == False:
remarks = 'Anomaly Detected - Multiple Cluster Hopping'
anomaly = True
elif probability == 0:
remarks = 'Anomaly Detected - Unusual State Transition Detected'
anomaly = True
elif probability <= float(page_threshold):
remarks = 'Anomaly Detected - In-frequent State Transition Detected'
anomaly = True
else:
if pageswitch == True:
if probability == 0:
remarks = 'Anomaly Detected - Unusual State Transition Detected'
anomaly = True
elif probability <= float(page_threshold):
remarks = 'Anomaly Detected - In-frequent State Transition Detected'
anomaly = True
else:
remarks = ''
if davg < float(chain_probability):
if anomaly == False:
remarks = 'Anomaly Detected - In-frequent Pattern Detected'
anomaly = True
else:
user_records['SessionID'] = data[sessionid]
user_records['Activity'] = data[activity]
user_records['probability'] = 0
user_records['probarry'] = []
user_records['chainprobability'] = 0
user_records['prevclusterid'] = -1
user_records['NoOfClusterHopping'] = 0
user_records['pageclicks'] = 1
for index, row in dfclus.iterrows():
clusterlist = row["clusterlist"]
if currentactivity in clusterlist:
clusterid = row["clusterid"]
user_records['prevclusterid'] = clusterid
outputStr = '{"status":"SUCCESS","data":{"Anomaly":"'+str(anomaly)+'","Remarks":"'+str(remarks)+'"}}'
elif operation.lower() == 'pattern_anomaly_settings':
data = json.loads(data)
groupswitching = data['groupswitching']
transitionprobability = data['transitionprobability']
transitionsequence = data['transitionsequence']
sequencethreshold = data['sequencethreshold']
filename = os.path.join(model_path,'clickstream.json')
data = {}
data['groupswitching'] = groupswitching
data['transitionprobability'] = transitionprobability
data['transitionsequence'] = transitionsequence
data['sequencethreshold'] = sequencethreshold
updatedConfig = json.dumps(data)
with open(filename, "w") as fpWrite:
fpWrite.write(updatedConfig)
fpWrite.close()
outputStr = '{"Status":"SUCCESS"}'
else:
outputStr = "{'Status':'Error','Msg':'Operation not supported'}"
else:
outputStr = "{'Status':'Error','Msg':'Model Not Present'}"
resp = outputStr
resp=resp+"\\n"
resp=resp.encode()
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(resp)
else:
self.do_AUTHHEAD()
self.wfile.write(self.headers.get("Authorization").encode())
resp = "Authentication Failed"
resp=resp.encode()
self.wfile.write(resp)
else:
print("python ==> else1")
self.send_response(403)
self.send_header('Content-Type', 'application/json')
self.end_headers()
print("PYTHON ######## REQUEST ####### ENDED")
return
def getModelFeatures(self,modelSignature):
datajson = {'Body':'Gives the list of features'}
home = expanduser("~")
if platform.system() == 'Windows':
predict_path = os.path.join(home,'AppData','Local','HCLT','AION','target',modelSignature,'featureslist.py')
else:
predict_path = os.path.join(home,'HCLT','AION','target',modelSignature,'featureslist.py')
if(os.path.isfile(predict_path)):
outputStr = subprocess.check_output([sys.executable,predict_path])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'features:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
displaymsg = outputStr
#displaymsg = json.dumps(displaymsg)
return(True,displaymsg)
else:
displaymsg = "{'status':'ERROR','msg':'Unable to fetch featuers'}"
return(False,displaymsg)
def getFeatures(self,modelSignature):
datajson = {'Body':'Gives the list of features'}
urltext = '/AION/UseCase_Version/features'
if modelSignature != '':
status, |
displaymsg = self.getModelFeatures(modelSignature)
if status:
urltext = '/AION/'+modelSignature+'/features'
else:
displaymsg = json.dumps(datajson)
else:
displaymsg = json.dumps(datajson)
msg="""
URL:{url}
RequestType: POST
Content-Type=application/json
Output: {displaymsg}.
""".format(url=urltext,displaymsg=displaymsg)
return(msg)
def features_help(self,modelSignature):
home = expanduser("~")
if platform.system() == 'Windows':
display_path = os.path.join(home,'AppData','Local','HCLT','AION','target',modelSignature,'display.json')
else:
display_path = os.path.join(home,'HCLT','AION','target',modelSignature,'display.json')
#display_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'target',model,'display.json')
datajson = {'Body':'Data Should be in JSON Format'}
if(os.path.isfile(display_path)):
with open(display_path) as file:
config = json.load(file)
file.close()
datajson={}
for feature in config['numericalFeatures']:
if feature != config['targetFeature']:
datajson[feature] = 'Numeric Value'
for feature in config['nonNumericFeatures']:
if feature != config['targetFeature']:
datajson[feature] = 'Category Value'
for feature in config['textFeatures']:
if feature != config['targetFeature']:
datajson[feature] = 'Category Value'
displaymsg = json.dumps(datajson)
return(displaymsg)
def predict_help(self,modelSignature):
if modelSignature != '':
displaymsg = self.features_help(modelSignature)
urltext = '/AION/'+modelSignature+'/predict'
else:
datajson = {'Body':'Data Should be in JSON Format'}
displaymsg = json.dumps(datajson)
urltext = '/AION/UseCase_Version/predict'
msg="""
URL:{url}
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
Output: prediction,probability(if Applicable),remarks corresponding to each row.
""".format(url=urltext,displaymsg=displaymsg)
return(msg)
def performance_help(self,modelSignature):
if modelSignature != '':
urltext = '/AION/'+modelSignature+'/performance'
else:
urltext = '/AION/UseCase_Version/performance'
datajson = {"trainingDataLocation":"Reference Data File Path","currentDataLocation":"Latest Data File Path"}
displaymsg = json.dumps(datajson)
msg="""
URL:{url}
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
Output: HTML File Path.""".format(url=urltext,displaymsg=displaymsg)
return(msg)
def monitoring_help(self,modelSignature):
if modelSignature != '':
urltext = '/AION/'+modelSignature+'/monitoring'
else:
urltext = '/AION/UseCase_Version/monitoring'
datajson = {"trainingDataLocation":"Reference Data File Path","currentDataLocation":"Latest Data File Path"}
displaymsg = json.dumps(datajson)
msg="""
URL:{url}
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
Output: Affected Columns. HTML File Path.""".format(url=urltext,displaymsg=displaymsg)
return(msg)
def explain_help(self,modelSignature):
if modelSignature != '':
displaymsg = self.features_help(modelSignature)
urltext = '/AION/'+modelSignature+'/explain'
else:
datajson = {'Body':'Data Should be in JSON Format'}
displaymsg = json.dumps(datajson)
urltext = '/AION/UseCase_Version/explain'
msg="""
URL:{url}
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
Output: anchor (Local Explanation),prediction,forceplot,multidecisionplot.""".format(url=urltext,displaymsg=displaymsg)
return(msg)
def help_text(self,modelSignature):
predict_help = self.predict_help(modelSignature)
explain_help = self.explain_help(modelSignature)
features_help = self.getFeatures(modelSignature)
monitoring_help = self.monitoring_help(modelSignature)
performance_help = self.performance_help(modelSignature)
msg="""
Following URL:
Prediction
{predict_help}
Local Explaination
{explain_help}
Features
{features_help}
Monitoring
{monitoring_help}
Performance
{performance_help}
""".format(predict_help=predict_help,explain_help=explain_help,features_help=features_help,monitoring_help=monitoring_help,performance_help=performance_help)
return msg
def do_GET(self):
print("PYTHON ######## REQUEST ####### STARTED")
if None != re.search('/AION/', self.path):
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
helplist = self.path.split('/')[-1]
print(helplist)
if helplist.lower() == 'help':
model = self.path.split('/')[-2]
if model.lower() == 'aion':
model =''
msg = self.help_text(model)
elif helplist.lower() == 'predict':
model = self.path.split('/')[-2]
if model.lower() == 'aion':
model =''
msg = self.predict_help(model)
elif helplist.lower() == 'explain':
model = self.path.split('/')[-2]
if model.lower() == 'aion':
model =''
msg = self.explain_help(model)
elif helplist.lower() == 'monitoring':
model = self.path.split('/')[-2]
if model.lower() == 'aion':
model =''
msg = self.monitoring_help(model)
elif helplist.lower() == 'performance':
model = self.path.split('/')[-2]
if model.lower() == 'aion':
model =''
msg = self.performance_help(model)
elif helplist.lower() == 'features':
model = self.path.split('/')[-2]
if model.lower() == 'aion':
model =''
status,msg = self.getModelFeatures(model)
else:
model = self.path.split('/')[-2]
if model.lower() == 'aion':
model =helplist
msg = self.help_text(model)
self.wfile.write(msg.encode())
else:
self.send_response(403)
self.send_header('Content-Type', 'application/json')
self.end_headers()
return
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
allow_reuse_address = True
def shutdown(self):
self.socket.close()
HTTPServer.shutdown(self)
class SimpleHttpServer():
def __init__(self, ip, port,username,password):
handler_class = partial(HTTPRequestHandler,username=username,password=password,)
self.server = ThreadedHTTPServer((ip,port), handler_class)
def start(self):
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def waitForThread(self):
self.server_thread.join()
def stop(self):
self.server.shutdown()
self.waitForThread()
def start_server(ip,port,username,password):
server = SimpleHttpServer(ip,int(port),username,password)
print('HTTP Server Running...........')
server.start()
server.waitForThread()
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import os
from pathlib import Path
os.chdir(Path(__file__).parent)
import json
import shutil
from mlac.timeseries import app as ts_app
from mlac.ml import app as ml_app
import traceback
def create_test_file(config):
code_file = 'aionCode.py'
text = """
from pathlib import Path
import subprocess
import sys
import json
import argparse
def run_pipeline(data_path):
print('Data Location:', data_path)
cwd = Path(__file__).parent
monitor_file = str(cwd/'ModelMonitoring'/'{code_file}')
load_file = str(cwd/'DataIngestion'/'{code_file}')
transformer_file = str(cwd/'DataTransformation'/'{code_file}')
selector_file = str(cwd/'FeatureEngineering'/'{code_file}')
train_folder = cwd
register_file = str(cwd/'ModelRegistry'/'{code_file}')
deploy_file = str(cwd/'ModelServing'/'{code_file}')
print('Running modelMonitoring')
cmd = [sys.executable, monitor_file, '-i', data_path]
result = subprocess.check_output(cmd)
result = result.decode('utf-8')
print(result)
result = json.loads(result[result.find('{search}'):])
if result['Status'] == 'Failure':
exit()
print('Running dataIngestion')
cmd = [sys.executable, load_file]
result = subprocess.check_output(cmd)
result = result.decode('utf-8')
print(result)
result = json.loads(result[result.find('{search}'):])
if result['Status'] == 'Failure':
exit()
print('Running DataTransformation')
cmd = [sys.executable, transformer_file]
result = subprocess.check_output(cmd)
result = result.decode('utf-8')
print(result)
result = json.loads(result[result.find('{search}'):])
if result['Status'] == 'Failure':
exit()
print('Running FeatureEngineering')
cmd = [sys.executable, selector_file]
result = subprocess.check_output(cmd)
result = result.decode('utf-8')
print(result)
result = json.loads(result[result.find('{search}'):])
if result['Status'] == 'Failure':
exit()
train_models = [f for f in train_folder.iterdir() if 'ModelTraining' in f.name]
for model in train_models:
print('Running',model.name)
cmd = [sys.executable, str(model/'{code_file}')]
train_result = subprocess.check_output(cmd)
train_result = train_result.decode('utf-8')
print(train_result)
print('Running ModelRegistry')
cmd = [sys.executable, register_file]
result = subprocess.check_output(cmd)
result = result.decode('utf-8')
print(result)
result = json.loads(result[result.find('{search}'):])
if result['Status'] == 'Failure':
exit()
print('Running ModelServing')
cmd = [sys.executable, deploy_file]
result = subprocess.check_output(cmd)
result = result.decode('utf-8')
print(result)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--inputPath', help='path of the input data')
args = parser.parse_args()
if args.inputPath:
filename = args.inputPath
else:
filename = r"{filename}"
try:
print(run_pipeline(filename))
except Exception as e:
print(e)
""".format(filename=config['dataLocation'],search='{"Status":',code_file=code_file)
deploy_path = Path(config["deploy_path"])/'MLaC'
deploy_path.mkdir(parents=True, exist_ok=True)
py_file = deploy_path/"run_pipeline.py"
with open(py_file, "w") as f:
f.write(text)
def is_module_in_req_file(mod, folder):
status = False
if (Path(folder)/'requirements.txt').is_file():
with open(folder/'requirements.txt', 'r') as f:
status = mod in f.read()
return status
def copy_local_modules(config):
deploy_path = Path(config["deploy_path"])
local_modules_location = config.get("local_modules_location", None)
if local_modules_location:
folder_loc = local_modules_location
else:
folder_loc = Path(__file__).parent/'local_modules'
if not folder_loc.exists():
folder_loc = None
if folder_loc:
file = folder_loc/'config.json'
if file.exists():
with open(file, 'r') as f:
data = json.load(f)
for key, values in data.items():
local_module = folder_loc/key
if local_module.exists():
for folder in values:
target_folder = Path(deploy_path)/'MLaC'/folder
if target_folder.is_dir():
if is_module_in_req_file(key, target_folder):
shutil.copy(local_module, |
target_folder)
def validate(config):
error = ''
if 'error' in config.keys():
error = config['error']
return error
def generate_mlac_code(config):
with open(config, 'r') as f:
config = json.load(f)
error = validate(config)
if error:
raise ValueError(error)
if config['problem_type'] in ['classification','regression']:
return generate_mlac_ML_code(config)
elif config['problem_type'].lower() == 'timeseriesforecasting': #task 11997
return generate_mlac_TS_code(config)
def generate_mlac_ML_code(config):
try:
ml_app.run_loader(config)
ml_app.run_transformer(config)
ml_app.run_selector(config)
ml_app.run_trainer(config)
ml_app.run_register(config)
ml_app.run_deploy(config)
ml_app.run_drift_analysis(config)
copy_local_modules(config)
create_test_file(config)
status = {'Status':'SUCCESS','MLaC_Location':str(Path(config["deploy_path"])/'MLaC')}
except Exception as Inst:
status = {'Status':'Failure','msg':str(Inst)}
traceback.print_exc()
status = json.dumps(status)
return(status)
def generate_mlac_TS_code(config):
try:
ts_app.run_loader(config)
ts_app.run_transformer(config)
ts_app.run_selector(config)
ts_app.run_trainer(config)
ts_app.run_register(config)
ts_app.run_deploy(config)
ts_app.run_drift_analysis(config)
create_test_file(config)
status = {'Status':'SUCCESS','MLaC_Location':str(Path(config["deploy_path"])/'MLaC')}
except Exception as Inst:
status = {'Status':'Failure','msg':str(Inst)}
traceback.print_exc()
status = json.dumps(status)
return(status)<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import joblib
import time
import pandas as pd
import numpy as np
import argparse
import json
import os
import pathlib
from pathlib import Path
from uncertainties.uq_main import aionUQ
import os
from datetime import datetime
from os.path import expanduser
import platform
import logging
class run_uq:
def __init__(self,modelfeatures,modelFile,csvFile,target):
self.modelfeatures=modelfeatures
self.modelFile=modelFile
self.csvFile=csvFile
self.target=target
##UQ classification fn
def getUQclassification(self,model,ProblemName,Params):
df = pd.read_csv(self.csvFile)
# # object_cols = [col for col, col_type in df.dtypes.iteritems() if col_type == 'object'] -- Fix for python 3.8.11 update (in 2.9.0.8)
object_cols = [col for col, col_type in zip(df.columns,df.dtypes) if col_type == 'object']
df = df.drop(object_cols, axis=1)
df = df.dropna(axis=1)
df = df.reset_index(drop=True)
modelfeatures = self.modelfeatures
#tar = args.target
# target = df[tar]
y=df[self.target].values
y = y.flatten()
X = df.drop(self.target, axis=1)
try:
uqObj=aionUQ(df,X,y,ProblemName,Params,model,modelfeatures,self.target)
accuracy,uq_ece,output_jsonobject=uqObj.uqMain_BBMClassification()
except Exception as e:
print("uq error",e)
# print("UQ Classification: \\n",output_jsonobject)
# print(accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertaitny_per)
#print(output_jsonobject)
return accuracy,uq_ece,output_jsonobject
##UQ regression fn
def getUQregression(self,model,ProblemName,Params):
df = pd.read_csv(self.csvFile)
modelfeatures = self.modelfeatures
dfp = df[modelfeatures]
tar = self.target
target = df[tar]
uqObj=aionUQ(df,dfp,target,ProblemName,Params,model,modelfeatures,tar)
total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject=uqObj.uqMain_BBMRegression()
return total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject
def uqMain(self,model):
#print("inside uq main.\\n")
reg_status=""
class_status=""
algorithm_status=""
try:
model=model
if Path(self.modelFile).is_file():
ProblemName = model.__class__.__name__
if ProblemName in ['LogisticRegression','SGDClassifier','SVC','DecisionTreeClassifier','RandomForestClassifier','GaussianNB','KNeighborsClassifier','GradientBoostingClassifier']:
Problemtype = 'Classification'
elif ProblemName in ['LinearRegression','Lasso','Ridge','DecisionTreeRegressor','RandomForestRegressor']:
Problemtype = 'Regression'
else:
Problemtype = "None"
if Problemtype.lower() == 'classification':
try:
Params = model.get_params()
accuracy,uq_ece,output = self.getUQclassification(model,ProblemName,Params)
class_status="SUCCESS"
#print(output)
except Exception as e:
print(e)
class_status="FAILED"
output = {'Problem':'None','msg':str(e)}
output = json.dumps(output)
elif Problemtype.lower() == 'regression' :
try:
Params = model.get_params()
total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,output = self.getUQregression(model,ProblemName,Params)
#print(uq_jsonobject)
reg_status="SUCCESS"
except Exception as e:
output = {'Problem':'None','msg':str(e)}
output = json.dumps(output)
reg_status="FAILED"
else:
try:
output={}
output['Problem']="None"
output['msg']="Uncertainty Quantification not supported for this algorithm."
output = json.dumps(output)
algorithm_status="FAILED"
except:
algorithm_status="FAILED"
except Exception as e:
print(e)
reg_status="FAILED"
class_status="FAILED"
algorithm_status="FAILED"
output = {'Problem':'None','msg':str(e)}
output = json.dumps(output)
return class_status,reg_status,algorithm_status,output
def aion_uq(modelFile,dataFile,features,targetfeatures):
try:
from appbe.dataPath import DEPLOY_LOCATION
uqLogLocation = os.path.join(DEPLOY_LOCATION,'logs')
try:
os.makedirs(uqLogLocation)
except OSError as e:
if (os.path.exists(uqLogLocation)):
pass
else:
raise OSError('uqLogLocation error.')
filename_uq = 'uqlog_'+str(int(time.time()))
filename_uq=filename_uq+'.log'
filepath = os.path.join(uqLogLocation, filename_uq)
print(filepath)
logging.basicConfig(filename=filepath, format='%(message)s',filemode='w')
log = logging.getLogger('aionUQ')
log.setLevel(logging.INFO)
log.info('************* Version - v1.7.0 *************** \\n')
if isinstance(features, list):
modelfeatures = features
else:
if ',' in features:
modelfeatures = [x.strip() for x in features.split(',')]
else:
modelfeatures = features.split(',')
model = joblib.load(modelFile)
uqobj = run_uq(modelfeatures,modelFile,dataFile,targetfeatures)
class_status,reg_status,algorithm_status,output=uqobj.uqMain(model)
if (class_status.lower() == 'failed'):
log.info('uq classifiction failed./n')
elif (class_status.lower() == 'success'):
log.info('uq classifiction success./n')
else:
log.info('uq classifiction not used../n')
if (reg_status.lower() == 'failed'):
log.info('uq regression failed./n')
elif (reg_status.lower() == 'success'):
log.info('uq regression success./n')
else:
log.info('uq regression not used./n')
if (algorithm_status.lower() == 'failed'):
log.info('Problem type issue, UQ only support classification and regression. May be selected algorithm not supported by Uncertainty Quantification currently./n')
except Exception as e:
log.info('uq test failed.n'+str(e))
#print(e)
output = {'Problem':'None','msg':str(e)}
output = json.dumps(output)
return(output)
#Sagemaker main fn call
if __name__=='__main__':
try:
parser = argparse.ArgumentParser()
parser.add_argument('savFile')
parser.add_argument('csvFile')
parser.add_argument('features')
parser.add_argument('target')
args = parser.parse_args()
home = expanduser("~")
if platform.system() == 'Windows':
uqLogLocation = os.path.join(home,'AppData','Local','HCLT','AION','uqLogs')
else:
uqLogLocation = os.path.join(home,'HCLT','AION','uqLogs')
try:
os.makedirs(uqLogLocation)
except OSError as e:
if (os.path.exists(uqLogLocation)):
pass
else:
raise OSError('uqLogLocation error.')
# self.sagemakerLogLocation=str(sagemakerLogLocation)
filename_uq = 'uqlog_'+str(int(time.time()))
filename_uq=filename_uq+'.log'
# filename = 'mlopsLog_'+Time()
filepath = os.path.join(uqLogLocation, filename_uq)
logging.basicConfig(filename=filepath, format='%(message)s',filemode='w')
log = logging.getLogger('aionUQ')
log.setLevel(logging.DEBUG)
if ',' in args.features:
args.features = [x.strip() for x in args.features.split(',')]
else:
args.features = args.features.split(',')
modelFile = args.savFile
modelfeatures = args.features
csvFile = args.csvFile
target=args.target
model = joblib.load(args.savFile)
##Main uq function call
uqobj = run_uq(modelfeatures,modelFile,csvFile,target)
class_status,reg_status,algorithm_status,output=uqobj.uqMain(model)
if (class_status.lower() == 'failed'):
log.info('uq classifiction failed./n')
elif (class_status.lower() == 'success'):
log.info('uq classifiction success./n')
else:
log.info('uq classifiction not used../n')
if (reg_status.lower() == 'failed'):
log.info('uq regression failed./n')
elif (reg_status.lower() == 'success'):
log.info('uq regression success./n')
else:
log.info('uq regression not used./n')
if (algorithm_status.lower() == 'failed'):
msg = 'Uncertainty Quantification not supported for this algorithm'
log.info('Algorithm not supported by Uncertainty Quantification./n')
output = {'Problem':'None','msg':str(msg)}
output = json.dumps(output)
except Exception as e:
log.info('uq test failed.n'+str(e))
output = {'Problem':'None','msg':str(e)}
output = json.dumps(output)
#print(e)
print(output)<s> import json
import logging
import os
import shutil
import time
import sys
from sys import platform
from distutils.util import strtobool
from config_manager.pipeline_config import AionConfigManager
from summarizer import Summarizer
# Base class for EION configuration Manager which read the needed f params from eion.json, initialize the parameterlist, read the respective params, store in variables and return back to caller function or external modules.
class AionTextManager:
def __init__(self):
self.log = logging.getLogger('eion')
self.data = ''
self.problemType = ''
self.basic = []
self.advance=[]
def readTextfile(self,dataPath):
#dataPath=self.[baisc][]
file = open(dataPath, "r")
data = file.read()
return data
#print(data)
def generateSummary(self,data,algo,stype):
bert_model = Summarizer()
if stype == "large":
bert_summary = ''.join(bert_model(data, min_length=300))
return(bert_summary)
elif stype == "medium":
bert_summary = ''.join(bert_model(data, min_length=150))
return(bert_summary)
elif stype == "small":
bert_summary = ''.join(bert_model(data, min_length=60))
return(bert_summary)
def aion_textsummary(arg):
Obj = AionTextManager()
configObj = AionConfigManager()
readConfistatus,msg = configObj.readConfigurationFile(arg)
dataPath = configObj.getTextlocation()
text_data = Obj.readTextfile(data |
Path)
getAlgo, getMethod = configObj.getTextSummarize()
summarize = Obj.generateSummary(text_data, getAlgo, getMethod)
output = {'status':'Success','summary':summarize}
output_json = json.dumps(output)
return(output_json)
if __name__ == "__main__":
aion_textsummary(sys.argv[1])
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import sys
import json
import datetime, time, timeit
import logging
logging.getLogger('tensorflow').disabled = True
import shutil
import warnings
from config_manager.online_pipeline_config import OTAionConfigManager
from records import pushrecords
import logging
import mlflow
from pathlib import Path
from pytz import timezone
def pushRecordForOnlineTraining():
try:
from appbe.pages import getversion
status,msg = pushrecords.enterRecord(AION_VERSION)
except Exception as e:
print("Exception", e)
status = False
msg = str(e)
return status,msg
def mlflowSetPath(path,experimentname):
import mlflow
url = "file:" + str(Path(path).parent.parent) + "/mlruns"
mlflow.set_tracking_uri(url)
mlflow.set_experiment(str(experimentname))
class server():
def __init__(self):
self.response = None
self.dfNumCols=0
self.dfNumRows=0
self.features=[]
self.mFeatures=[]
self.emptyFeatures=[]
self.vectorizerFeatures=[]
self.wordToNumericFeatures=[]
self.profilerAction = []
self.targetType = ''
self.matrix1='{'
self.matrix2='{'
self.matrix='{'
self.trainmatrix='{'
self.numericalFeatures=[]
self.nonNumericFeatures=[]
self.similarGroups=[]
self.method = 'NA'
self.pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
self.modelSelTopFeatures=[]
self.topFeatures=[]
self.allFeatures=[]
def startScriptExecution(self, config_obj):
rowfilterexpression = ''
grouperbyjson = ''
model_tried=''
learner_type = ''
topics = {}
numericContinuousFeatures=''
discreteFeatures=''
threshold=-1
targetColumn = ''
categoricalFeatures=''
dataFolderLocation = ''
original_data_file = ''
profiled_data_file = ''
trained_data_file = ''
predicted_data_file=''
featureReduction = 'False'
reduction_data_file=''
params={}
score = 0
labelMaps={}
featureDataShape=[]
self.riverModels = []
self.riverAlgoNames = ['Online Logistic Regression', 'Online Softmax Regression', 'Online Decision Tree Classifier', 'Online KNN Classifier', 'Online Linear Regression', 'Online Bayesian Linear Regression', 'Online Decision Tree Regressor','Online KNN Regressor']
#ConfigSettings
iterName,iterVersion,dataLocation,deployLocation,delimiter,textqualifier = config_obj.getAIONLocationSettings()
scoreParam = config_obj.getScoringCreteria()
datetimeFeature,indexFeature,modelFeatures=config_obj.getFeatures()
iterName = iterName.replace(" ", "_")
deployLocation,dataFolderLocation,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,logFileName,outputjsonFile = config_obj.createDeploymentFolders(deployLocation,iterName,iterVersion)
#Mlflow
mlflowSetPath(deployLocation,iterName+'_'+iterVersion)
#Logger
filehandler = logging.FileHandler(logFileName, 'w','utf-8')
formatter = logging.Formatter('%(message)s')
filehandler.setFormatter(formatter)
log = logging.getLogger('eion')
log.propagate = False
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
log.removeHandler(hdlr)
log.addHandler(filehandler)
log.setLevel(logging.INFO)
log.info('************* Version - v2.2.5 *************** \\n')
msg = '-------> Execution Start Time: '+ datetime.datetime.now(timezone("Asia/Kolkata")).strftime('%Y-%m-%d %H:%M:%S' + ' IST')
log.info(msg)
startTime = timeit.default_timer()
try:
output = {'bestModel': '', 'bestScore': 0, 'bestParams': {}}
#ConfigSetting
problemType,targetFeature,profilerStatus,selectorStatus,learnerStatus,visualizationstatus,deployStatus = config_obj.getModulesDetails()
selectorStatus = False
if(problemType.lower() in ['classification','regression']):
if(targetFeature == ''):
output = {"status":"FAIL","message":"Target Feature is Must for Classification and Regression Problem Type"}
return output
#DataReading
from transformations.dataReader import dataReader
objData = dataReader()
if os.path.isfile(dataLocation):
dataFrame = objData.csvTodf(dataLocation,delimiter,textqualifier)
dataFrame.rename(columns=lambda x:x.strip(), inplace=True)
#FilterDataframe
filter = config_obj.getfilter()
if filter != 'NA':
dataFrame,rowfilterexpression = objData.rowsfilter(filter,dataFrame)
#GroupDataframe
timegrouper = config_obj.gettimegrouper()
grouping = config_obj.getgrouper()
if grouping != 'NA':
dataFrame,grouperbyjson = objData.grouping(grouping,dataFrame)
elif timegrouper != 'NA':
dataFrame,grouperbyjson = objData.timeGrouping(timegrouper,dataFrame)
#KeepOnlyModelFtrs
dataFrame = objData.removeFeatures(dataFrame,datetimeFeature,indexFeature,modelFeatures,targetFeature)
log.info('\\n-------> First Ten Rows of Input Data: ')
log.info(dataFrame.head(10))
self.dfNumRows=dataFrame.shape[0]
self.dfNumCols=dataFrame.shape[1]
dataLoadTime = timeit.default_timer() - startTime
log.info('-------> COMPUTING: Total dataLoadTime time(sec) :'+str(dataLoadTime))
if profilerStatus:
log.info('\\n================== Data Profiler has started ==================')
log.info('Status:-|... AION feature transformation started')
dp_mlstart = time.time()
profilerJson = config_obj.getEionProfilerConfigurarion()
log.info('-------> Input dataFrame(5 Rows): ')
log.info(dataFrame.head(5))
log.info('-------> DataFrame Shape (Row,Columns): '+str(dataFrame.shape))
from incremental.incProfiler import incProfiler
incProfilerObj = incProfiler()
dataFrame,targetColumn,self.mFeatures,self.numericalFeatures,self.nonNumericFeatures,labelMaps,self.configDict,self.textFeatures,self.emptyFeatures,self.wordToNumericFeatures = incProfilerObj.startIncProfiler(dataFrame,profilerJson,targetFeature,deployLocation,problemType)
self.features = self.configDict['allFtrs']
log.info('-------> Data Frame Post Data Profiling(5 Rows): ')
log.info(dataFrame.head(5))
log.info('Status:-|... AION feature transformation completed')
dp_mlexecutionTime=time.time() - dp_mlstart
log.info('-------> COMPUTING: Total Data Profiling Execution Time '+str(dp_mlexecutionTime))
log.info('================== Data Profiling completed ==================\\n')
dataFrame.to_csv(profiled_data_file,index=False)
selectorStatus = False
if learnerStatus:
log.info('Status:-|... AION Learner data preparation started')
ldp_mlstart = time.time()
testPercentage = config_obj.getAIONTestTrainPercentage()
balancingMethod = config_obj.getAIONDataBalancingMethod()
from learner.machinelearning import machinelearning
mlobj = machinelearning()
modelType = problemType.lower()
targetColumn = targetFeature
if modelType == "na":
if self.targetType == 'categorical':
modelType = 'classification'
elif self.targetType == 'continuous':
modelType = 'regression'
datacolumns=list(dataFrame.columns)
if targetColumn in datacolumns:
datacolumns.remove(targetColumn)
features =datacolumns
featureData = dataFrame[features]
if targetColumn != '':
targetData = dataFrame[targetColumn]
xtrain,ytrain,xtest,ytest = mlobj.split_into_train_test_data(featureData,targetData,testPercentage,modelType)
categoryCountList = []
if modelType == 'classification':
if(mlobj.checkForClassBalancing(ytrain) >= 1):
xtrain,ytrain = mlobj.ExecuteClassBalancing(xtrain,ytrain,balancingMethod)
valueCount=targetData.value_counts()
categoryCountList=valueCount.tolist()
ldp_mlexecutionTime=time.time() - ldp_mlstart
log.info('-------> COMPUTING: Total Learner data preparation Execution Time '+str(ldp_mlexecutionTime))
log.info('Status:-|... AION Learner data preparation completed')
if learnerStatus:
log.info('\\n================== ML Started ==================')
log.info('Status:-|... AION training started')
log.info('-------> Memory Usage by DataFrame During Learner Status '+str(dataFrame.memory_usage(deep=True).sum()))
mlstart = time.time()
log.info('-------> Target Problem Type:'+ self.targetType)
learner_type = 'ML'
learnerJson = config_obj.getEionLearnerConfiguration()
log.info('-------> Target Model Type:'+ modelType)
modelParams,modelList = config_obj.getEionLearnerModelParams(modelType)
if(modelType == 'regression'):
allowedmatrix = ['mse','r2','rmse','mae']
if(scoreParam.lower() not in allowedmatrix):
scoreParam = 'mse'
if(modelType == 'classification'):
allowedmatrix = ['accuracy','recall','f1_score','precision','roc_auc']
if(scoreParam.lower() not in allowedmatrix):
scoreParam = 'accuracy'
scoreParam = scoreParam.lower()
from incremental.incMachineLearning import incMachineLearning
incMlObj = incMachineLearning(mlobj)
self.configDict['riverModel'] = False
status,model_type,model,saved_model,matrix,trainmatrix,featureDataShape,model_tried,score,filename,self.features,threshold,pscore,rscore,self.method,loaded_model,xtrain1,ytrain1,xtest1,ytest1,topics,params=incMlObj.startLearning(learnerJson,modelType,modelParams,modelList,scoreParam,self.features,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,self.targetType,deployLocation,iterName,iterVersion,trained_data_file,predicted_data_file,labelMaps)
if model in self.riverAlgoNames:
self.configDict['riverModel'] = True
if(self.matrix != '{'):
self.matrix += ','
if(self.trainmatrix != '{'):
self.trainmatrix += ','
self.trainmatrix += trainmatrix
self.matrix += matrix
mlexecutionTime=time.time() - mlstart
log.info('-------> Total ML Execution Time '+str(mlexecutionTime))
log.info('Status:-|... AION training completed')
log.info('================== ML Completed ==================\\n')
if visualizationstatus:
visualizationJson = config_obj.getEionVisualizationConfiguration()
log.info('Status:-|... AION Visualizer started')
visualizer_mlstart = time.time()
from visualization.visualization import Visualization
visualizationObj = Visualization(iterName,iterVersion,dataFrame,visualizationJson,datetimeFeature,deployLocation,dataFolderLocation,numericContinuousFeatures,discreteFeatures,categoricalFeatures,self.features,targetFeature,model_type,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,labelMaps,self.vectorizerFeatures,self.textFeatures,self.numericalFeatures,self.nonNumericFeatures,self.emptyFeatures,self.dfNum |
Rows,self.dfNumCols,saved_model,scoreParam,learner_type,model,featureReduction,reduction_data_file)
visualizationObj.visualizationrecommandsystem()
visualizer_mlexecutionTime=time.time() - visualizer_mlstart
log.info('-------> COMPUTING: Total Visualizer Execution Time '+str(visualizer_mlexecutionTime))
log.info('Status:-|... AION Visualizer completed')
try:
os.remove(os.path.join(deployLocation,'aion_xai.py'))
except:
pass
if deployStatus:
if str(model) != 'None':
log.info('\\n================== Deployment Started ==================')
log.info('Status:-|... AION Deployer started')
deployPath = deployLocation
deployer_mlstart = time.time()
src = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','utilities','useCaseFiles')
shutil.copy2(os.path.join(src,'incBatchLearning.py'),deployPath)
os.rename(os.path.join(deployPath,'incBatchLearning.py'),os.path.join(deployPath,'aion_inclearning.py'))
shutil.copy2(os.path.join(src,'incBatchPrediction.py'),deployPath)
os.rename(os.path.join(deployPath,'incBatchPrediction.py'),os.path.join(deployPath,'aion_predict.py'))
self.configDict['modelName'] = str(model)
self.configDict['modelParams'] = params
self.configDict['problemType'] = problemType.lower()
self.configDict['score'] = score
self.configDict['metricList'] = []
self.configDict['metricList'].append(score)
self.configDict['trainRowsList'] = []
self.configDict['trainRowsList'].append(featureDataShape[0])
self.configDict['scoreParam'] = scoreParam
self.configDict['partialFit'] = 0
with open(os.path.join(deployLocation,'production', 'Config.json'), 'w', encoding='utf8') as f:
json.dump(self.configDict, f, ensure_ascii=False)
deployer_mlexecutionTime=time.time() - deployer_mlstart
log.info('-------> COMPUTING: Total Deployer Execution Time '+str(deployer_mlexecutionTime))
log.info('Status:-|... AION Batch Deployment completed')
log.info('================== Deployment Completed ==================')
# self.features = profilerObj.set_features(self.features,self.textFeatures,self.vectorizerFeatures)
self.matrix += '}'
self.trainmatrix += '}'
matrix = eval(self.matrix)
trainmatrix = eval(self.trainmatrix)
model_tried = eval('['+model_tried+']')
try:
json.dumps(params)
output_json = {"status":"SUCCESS","data":{"ModelType":modelType,"deployLocation":deployPath,"BestModel":model,"BestScore":str(score),"ScoreType":str(scoreParam).upper(),"matrix":matrix,"trainmatrix":trainmatrix,"featuresused":str(self.features),"targetFeature":str(targetColumn),"params":params,"EvaluatedModels":model_tried,"LogFile":logFileName}}
except:
output_json = {"status":"SUCCESS","data":{"ModelType":modelType,"deployLocation":deployPath,"BestModel":model,"BestScore":str(score),"ScoreType":str(scoreParam).upper(),"matrix":matrix,"trainmatrix":trainmatrix,"featuresused":str(self.features),"targetFeature":str(targetColumn),"params":"","EvaluatedModels":model_tried,"LogFile":logFileName}}
print(output_json)
if bool(topics) == True:
output_json['topics'] = topics
with open(outputjsonFile, 'w') as f:
json.dump(output_json, f)
output_json = json.dumps(output_json)
log.info('\\n------------- Summary ------------')
log.info('------->No of rows & columns in data:('+str(self.dfNumRows)+','+str(self.dfNumCols)+')')
log.info('------->No of missing Features :'+str(len(self.mFeatures)))
log.info('------->Missing Features:'+str(self.mFeatures))
log.info('------->Text Features:'+str(self.textFeatures))
log.info('------->No of Nonnumeric Features :'+str(len(self.nonNumericFeatures)))
log.info('------->Non-Numeric Features :' +str(self.nonNumericFeatures))
if threshold == -1:
log.info('------->Threshold: NA')
else:
log.info('------->Threshold: '+str(threshold))
log.info('------->Label Maps of Target Feature for classification :'+str(labelMaps))
if((learner_type != 'TS') & (learner_type != 'AR')):
log.info('------->No of columns and rows used for Modeling :'+str(featureDataShape))
log.info('------->Features Used for Modeling:'+str(self.features))
log.info('------->Target Feature: '+str(targetColumn))
log.info('------->Best Model Score :'+str(score))
log.info('------->Best Parameters:'+str(params))
log.info('------->Type of Model :'+str(modelType))
log.info('------->Best Model :'+str(model))
log.info('------------- Summary ------------\\n')
except Exception as inst:
log.info('server code execution failed !....'+str(inst))
output_json = {"status":"FAIL","message":str(inst).strip('"')}
output_json = json.dumps(output_json)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
executionTime = timeit.default_timer() - startTime
log.info('\\nTotal execution time(sec) :'+str(executionTime))
log.info('\\n------------- Output JSON ------------')
log.info('-------> Output :'+str(output_json))
log.info('------------- Output JSON ------------\\n')
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
hdlr.close()
log.removeHandler(hdlr)
return output_json
def aion_ot_train_model(arg):
warnings.filterwarnings('ignore')
try:
valid, msg = pushRecordForOnlineTraining()
if valid:
serverObj = server()
configObj = OTAionConfigManager()
jsonPath = arg
readConfistatus,msg = configObj.readConfigurationFile(jsonPath)
if(readConfistatus == False):
output = {"status":"FAIL","message":str(msg).strip('"')}
output = json.dumps(output)
print("\\n")
print("aion_learner_status:",output)
print("\\n")
return output
output = serverObj.startScriptExecution(configObj)
else:
output = {"status":"LicenseVerificationFailed","message":str(msg).strip('"')}
output = json.dumps(output)
print("\\n")
print("aion_learner_status:",output)
print("\\n")
return output
except Exception as inst:
output = {"status":"FAIL","message":str(inst).strip('"')}
output = json.dumps(output)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
print("\\n")
print("aion_learner_status:",output)
print("\\n")
return output
if __name__ == "__main__":
aion_ot_train_model(sys.argv[1])
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import joblib
import time
from pandas import json_normalize
import pandas as pd
import numpy as np
import argparse
import json
import os
import pathlib
from pathlib import Path
from sagemaker.aionMlopsService import aionMlopsService
import logging
import os.path
from os.path import expanduser
import platform,sys
from pathlib import Path
from sklearn.model_selection import train_test_split
def getAWSConfiguration(mlops_params,log):
awsId=mlops_params['awsSagemaker']['awsID']
if ((not awsId) or (awsId is None)):
awsId=""
log.info('awsId error. ')
awsAccesskeyid=mlops_params['awsSagemaker']['accesskeyID']
if ((not awsAccesskeyid) or (awsAccesskeyid is None)):
awsAccesskeyid=""
log.info('awsAccesskeyid error. ')
awsSecretaccesskey=mlops_params['awsSagemaker']['secretAccesskey']
if ((not awsSecretaccesskey) or (awsSecretaccesskey is None)):
awsSecretaccesskey=""
log.info('awsSecretaccesskey error. ')
awsSessiontoken=mlops_params['awsSagemaker']['sessionToken']
if ((not awsSessiontoken) or (awsSessiontoken is None)):
awsSessiontoken=""
log.info('awsSessiontoken error. ')
awsRegion=mlops_params['awsSagemaker']['region']
if ((not awsRegion) or (awsRegion is None)):
awsRegion=""
log.info('awsRegion error. ')
IAMSagemakerRoleArn=mlops_params['awsSagemaker']['IAMSagemakerRoleArn']
if ((not IAMSagemakerRoleArn) or (IAMSagemakerRoleArn is None)):
IAMSagemakerRoleArn=""
log.info('IAMSagemakerRoleArn error. ')
return awsId,awsAccesskeyid,awsSecretaccesskey,awsSessiontoken,awsRegion,IAMSagemakerRoleArn
def getMlflowParams(mlops_params,log):
modelInput = mlops_params['modelInput']
data = mlops_params['data']
mlflowtosagemakerDeploy=mlops_params['sagemakerDeploy']
if ((not mlflowtosagemakerDeploy) or (mlflowtosagemakerDeploy is None)):
mlflowtosagemakerDeploy="True"
mlflowtosagemakerPushOnly=mlops_params['deployExistingModel']['status']
if ((not mlflowtosagemakerPushOnly) or (mlflowtosagemakerPushOnly is None)):
mlflowtosagemakerPushOnly="False"
mlflowtosagemakerPushImageName=mlops_params['deployExistingModel']['dockerImageName']
if ((not mlflowtosagemakerPushImageName) or (mlflowtosagemakerPushImageName is None)):
mlflowtosagemakerPushImageName="mlops_image"
mlflowtosagemakerdeployModeluri=mlops_params['deployExistingModel']['deployModeluri']
if ((not mlflowtosagemakerdeployModeluri) or (mlflowtosagemakerdeployModeluri is None)):
mlflowtosagemakerdeployModeluri="None"
log.info('mlflowtosagemakerdeployModeluri error. ')
cloudInfrastructure = mlops_params['modelOutput']['cloudInfrastructure']
if ((not cloudInfrastructure) or (cloudInfrastructure is None)):
cloudInfrastructure="Sagemaker"
endpointName=mlops_params['endpointName']
if ((not endpointName) or (endpointName is None)):
sagemakerAppName="aion-demo-app"
log.info('endpointName not given, setting default one. ')
experimentName=str(endpointName)
mlflowContainerName=str(endpointName)
return modelInput,data,mlflowtosagemakerDeploy,mlflowtosagemakerPushOnly,mlflowtosagemakerPushImageName,mlflowtosagemakerdeployModeluri,cloudInfrastructure,endpointName,experimentName,mlflowContainerName
def getPredictionParams(mlops_params,log):
predictStatus=mlops_params['prediction']['status']
if ((not predictStatus) or (predictStatus is None)):
predictStatus="False"
modelInput = mlops_params['modelInput']
data = mlops_params['data']
if (predictStatus == "True" or predictStatus.lower()== "true"):
if ((not modelInput) or (modelInput is None)):
log.info('prediction model input error.Please check given model file or its path for prediction ')
if ((not data) or (data is None)):
log.info('prediction data input error.Please check given data file or its path for prediction ')
targetFeature=mlops_params['prediction']['target']
return predictStatus,targetFeature
def sagemakerPrediction(mlopsobj,data,log):
df = json_normalize(data)
model=None
predictionStatus=False
try:
endpointPrediction=mlopsobj.predict_sm_app_endpoint(df)
if (endpointPrediction is None):
log.info('Sagemaker endpoint application prediction Issue.')
outputjson = {"status":"Error","msg":"Sagemaker endpoint application prediction Issue"}
outputjson = json.dumps(outputjson)
#print("predictions: "+str(outputjson))
predictionStatus=False
else:
log.info("sagemaker end point Prediction: \\n"+str(endpointPrediction))
df['prediction'] = endpointPred |
iction
outputjson = df.to_json(orient='records')
outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}
outputjson = json.dumps(outputjson)
#print("predictions: "+str(outputjson))
predictionStatus=True
except Exception as e:
#log.info("sagemaker end point Prediction error: \\n")
outputjson = {"status":"Error","msg":str(e)}
outputjson=None
predictionStatus=False
return outputjson,predictionStatus
## Main aion sagemaker fn call
def sagemaker_exec(mlops_params,log):
#mlops_params = json.loads(config)
mlops_params=mlops_params
modelInput,data,mlflowtosagemakerDeploy,mlflowtosagemakerPushOnly,mlflowtosagemakerPushImageName,mlflowtosagemakerdeployModeluri,cloudInfrastructure,endpointName,experimentName,mlflowContainerName = getMlflowParams(mlops_params,log)
mlflowModelname=None
awsId,awsAccesskeyid,awsSecretaccesskey,awsSessiontoken,awsRegion,IAMSagemakerRoleArn = getAWSConfiguration(mlops_params,log)
predictStatus,targetFeature = getPredictionParams(mlops_params,log)
sagemakerDeployOption='create'
deleteAwsecrRepository='False'
sagemakerAppName=str(endpointName)
ecrRepositoryName='aion-ecr-repo'
#aws ecr model app_name should contain only [[a-zA-Z0-9-]], again rechecking here.
import re
if sagemakerAppName:
pattern = re.compile("[A-Za-z0-9-]+")
# if found match (entire string matches pattern)
if pattern.fullmatch(sagemakerAppName) is not None:
#print("Found match: ")
pass
else:
log.info('wrong sagemaker Application Name, Nmae should contains only [A-Za-z0-9-] .')
app_name = 'aion-demo-app'
else:
app_name = 'aion-demo-app'
#Following 3 aws parameter values are now hard coded , because currently we are not using. If aion using the options, please make sure to get the values from GUI .
sagemakerDeployOption="create"
deleteAwsecrRepository="False"
ecrRepositoryName="aion_test_repo"
log.info('mlops parameter check done.')
# predictionStatus=False
deploystatus = 'SUCCESS'
try:
log.info('cloudInfrastructure: '+str(cloudInfrastructure))
if(cloudInfrastructure.lower() == "sagemaker"):
## sagemaker app prediction call
if (predictStatus.lower() == "true"):
# df = json_normalize(data)
model=None
mlopsobj = aionMlopsService(model,mlflowtosagemakerDeploy,mlflowtosagemakerPushOnly,mlflowtosagemakerPushImageName,mlflowtosagemakerdeployModeluri,experimentName,mlflowModelname,awsAccesskeyid,awsSecretaccesskey,awsSessiontoken,mlflowContainerName,awsRegion,awsId,IAMSagemakerRoleArn,sagemakerAppName,sagemakerDeployOption,deleteAwsecrRepository,ecrRepositoryName)
outputjson,predictionStatus = sagemakerPrediction(mlopsobj,data,log)
print("predictions: "+str(outputjson))
predictionStatus=predictionStatus
return(outputjson)
else:
if Path(modelInput).is_file():
msg = ''
model = joblib.load(modelInput)
ProblemName = model.__class__.__name__
mlflowModelname=str(ProblemName)
log.info('aion mlops Model name: '+str(mlflowModelname))
df=None
mlopsobj = aionMlopsService(model,mlflowtosagemakerDeploy,mlflowtosagemakerPushOnly,mlflowtosagemakerPushImageName,mlflowtosagemakerdeployModeluri,experimentName,mlflowModelname,awsAccesskeyid,awsSecretaccesskey,awsSessiontoken,mlflowContainerName,awsRegion,awsId,IAMSagemakerRoleArn,sagemakerAppName,sagemakerDeployOption,deleteAwsecrRepository,ecrRepositoryName)
mlflow2sm_status,localhost_container_status=mlopsobj.mlflow2sagemaker_deploy()
log.info('mlflow2sm_status: '+str(mlflow2sm_status))
log.info('localhost_container_status: '+str(localhost_container_status))
# Checking deploy status
if (mlflowtosagemakerPushOnly.lower() == "true" ):
if (mlflow2sm_status.lower() == "success"):
deploystatus = 'SUCCESS'
msg = 'Endpoint succesfully deployed in sagemaker'
log.info('Endpoint succesfully deployed in sagemaker (Push eisting model container).\\n ')
elif(mlflow2sm_status.lower() == "failed"):
deploystatus = 'ERROR'
msg = 'Endpoint failed to deploy in sagemaker'
log.info('Endpoint failed to deploy in sagemaker. (Push eisting model container).\\n ')
else:
pass
elif(mlflowtosagemakerDeploy.lower() == "true"):
if (mlflow2sm_status.lower() == "success"):
deploystatus='SUCCESS'
msg = 'Endpoint succesfully deployed in sagemaker'
log.info('Endpoint succesfully deployed in sagemaker')
elif(mlflow2sm_status.lower() == "failed"):
deploystatus = 'ERROR'
msg = 'Endpoint failed to deploy in sagemaker'
log.info('Endpoint failed to deploy in sagemaker.\\n ')
elif (mlflow2sm_status.lower() == "Notdeployed"):
deploystatus= 'ERROR'
msg = 'Sagemaker compatible container created'
log.info('sagemaker endpoint not deployed, check aws connection and credentials. \\n')
elif (mlflowtosagemakerDeploy.lower() == "false"):
if(localhost_container_status.lower() == "success"):
deploystatus = 'SUCCESS'
msg = 'Localhost mlops docker created successfully'
log.info('Localhost mlops docker created successfully. \\n')
elif(localhost_container_status.lower() == "failed"):
deploystatus = 'ERROR'
msg = 'Localhost mlops docker created failed'
log.info('Localhost mlops docker creation failed. \\n')
elif (localhost_container_status.lower() == "Notdeployed"):
deploystatus= 'ERROR'
log.info('Localhost mlops docker not deployed, check local docker status. \\n')
else:
pass
else:
pass
else:
deploystatus = 'ERROR'
msg = 'Model Path not Found'
print('Error: Model Path not Found')
outputjson = {"status":str(deploystatus),"data":str(msg)}
outputjson = json.dumps(outputjson)
print("predictions: "+str(outputjson))
return(outputjson)
except Exception as inst:
outputjson = {"status":str(deploystatus),"data":str(msg)}
outputjson = json.dumps(outputjson)
print("predictions: "+str(outputjson))
return(outputjson)
def aion_sagemaker(config):
try:
mlops_params = config
print(mlops_params)
from appbe.dataPath import LOG_LOCATION
sagemakerLogLocation = LOG_LOCATION
try:
os.makedirs(sagemakerLogLocation)
except OSError as e:
if (os.path.exists(sagemakerLogLocation)):
pass
else:
raise OSError('sagemakerLogLocation error.')
filename_mlops = 'mlopslog_'+str(int(time.time()))
filename_mlops=filename_mlops+'.log'
filepath = os.path.join(sagemakerLogLocation, filename_mlops)
logging.basicConfig(filename=filepath, format='%(message)s',filemode='w')
log = logging.getLogger('aionMLOps')
log.setLevel(logging.DEBUG)
output = sagemaker_exec(mlops_params,log)
return output
except Exception as inst:
print(inst)
deploystatus = 'ERROR'
output = {"status":str(deploystatus),"data":str(inst)}
output = json.dumps(output)
print("predictions: "+str(output))
return(output)
#Sagemaker main fn call
if __name__=='__main__':
json_config = str(sys.argv[1])
output = aion_sagemaker(json.loads(json_config))
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import requests
import json
import os
from datetime import datetime
import socket
import getmac
def telemetry_data(operation,Usecase,data):
now = datetime.now()
ID = datetime.timestamp(now)
record_date = now.strftime("%y-%m-%d %H:%M:%S")
try:
user = os.getlogin()
except:
user = 'NA'
computername = socket.getfqdn()
macaddress = getmac.get_mac_address()
item = {}
item['ID'] = str(int(ID))
item['record_date'] = record_date
item['UseCase'] = Usecase
item['user'] = str(user)
item['operation'] = operation
item['remarks'] = data
item['hostname'] = computername
item['macaddress'] = macaddress
url = 'https://l5m119j6v9.execute-api.ap-south-1.amazonaws.com/default/aion_telemetry'
record = {}
record['TableName'] = 'AION_OPERATION'
record['Item'] = item
record = json.dumps(record)
try:
response = requests.post(url, data=record,headers={"x-api-key":"Obzt8ijfOT3dgBYma9JCt1tE3W6tzHaV8rVuQdMK","Content-Type":"application/json",})
check_telemetry_file()
except Exception as inst:
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)),'telemetry.txt')
f=open(filename, "a+")
f.write(record+'\\n')
f.close()
def check_telemetry_file():
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'telemetry.txt')
if(os.path.isfile(file_path)):
f = open(file_path, 'r')
file_content = f.read()
f.close()
matched_lines = file_content.split('\\n')
write_lines = []
url = 'https://l5m119j6v9.execute-api.ap-south-1.amazonaws.com/default/aion_telemetry'
for record in matched_lines:
try:
response = requests.post(url, data=record,headers={"x-api-key":"Obzt8ijfOT3dgBYma9JCt1tE3W6tzHaV8rVuQdMK","Content-Type":"application/json",})
except:
write_lines.append(record)
f = open(file_path, "a")
f.seek(0)
f.truncate()
for record in write_lines:
f.write(record+'\\n')
f.close()
return True
else:
return True<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import sys
import json
import datetime,time,timeit
import itertools
#Sci-Tools imports
import numpy as np
import pandas as pd
import math
from statsmodels.tsa.stattools import adfuller
from scipy.stats.stats import pearsonr
from numpy import cumsum, log, polyfit, sqrt, std, subtract
from numpy.random import randn
from sklearn.metrics import normalized_mutual_info_score
from sklearn.feature_selection import mutual_info_regression
import logging
#SDP1 class import
from feature_engineering.featureImportance import featureImp
from feature_engineering.featureReducer import featureReducer
from sklearn.linear_model import Lasso, LogisticRegression
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.decomposition import PCA
from sklearn.decomposition import TruncatedSVD
from sklearn.decomposition import FactorAnalysis
from sklearn.decomposition import FastICA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.preprocessing import MinMaxScaler
from sklearn.feature_selection import RFE
def ranking(ranks, names, order=1):
minmax = MinMaxScaler()
ranks = minmax.fit_transform(order*np.array([ranks]).T).T[0]
ranks = map(lambda x: round(x,2), ranks)
return dict(zip(names, ranks))
# noinspection PyPep8Naming
class featureSelector():
def __init__(self):
self.pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
self.log = logging.getLogger('eion')
def startSelector(self,df,conf_json,textFeatures,targetFeature,problem_type):
try:
categoricalMaxLabel = int(conf_json['categoryMaxLabel'])
pca='None'
pcaReducerStatus = conf_json['featureEngineering']['PCA']
svdReducerStatus = conf_json['featureEngineering']['SVD']
factorReducerStatus = conf_json['featureEngineering']['FactorAnalysis']
icaReducerStatus = conf_json['featureEngineering']['ICA']
nfeatures=float(conf_json['featureEngineering']['numberofComponents'])
statisticalConfig = conf_json['statisticalConfig']
|
corrThresholdInput = float(statisticalConfig.get('correlationThresholdFeatures',0.50))
corrThresholdTarget = float(statisticalConfig.get('correlationThresholdTarget',0.85))
pValThresholdInput = float(statisticalConfig.get('pValueThresholdFeatures',0.05))
pValThresholdTarget = float(statisticalConfig.get('pValueThresholdTarget',0.04))
varThreshold = float(statisticalConfig.get('varianceThreshold',0.01))
allFeaturesSelector = conf_json['featureSelection']['allFeatures']
correlationSelector = conf_json['featureSelection']['statisticalBased']
modelSelector = conf_json['featureSelection']['modelBased']
featureSelectionMethod = conf_json['selectionMethod']['featureSelection']
featureEngineeringSelector = conf_json['selectionMethod']['featureEngineering']
if featureSelectionMethod == 'True':
featureEngineeringSelector = 'False'
# if feature engineering is true then we check weather PCA is true or svd is true. By default we will run PCA
if featureEngineeringSelector == 'True':
if pcaReducerStatus == 'True':
svdReducerStatus = 'False'
factorReducerStatus=='False'
icaReducerStatus == 'False'
elif svdReducerStatus == 'True':
pcaReducerStatus = 'False'
factorReducerStatus=='False'
icaReducerStatus == 'False'
elif factorReducerStatus=='True':
pcaReducerStatus=='False'
svdReducerStatus=='False'
icaReducerStatus=='False'
elif icaReducerStatus=='True':
pcaReducerStatus=="False"
svdReducerStatus=="False"
factorReducerStatus=="False"
else:
pcaReducerStatus = 'True'
if featureSelectionMethod == 'False' and featureEngineeringSelector == 'False':
featureSelectionMethod = 'True'
if featureSelectionMethod == 'True':
if modelSelector == 'False' and correlationSelector == 'False' and allFeaturesSelector == 'False':
modelSelector = 'True'
reductionMethod = 'na'
bpca_features = []
#nfeatures = 0
if 'maxClasses' in conf_json:
maxclasses = int(conf_json['maxClasses'])
else:
maxClasses = 20
target = targetFeature
self.log.info('-------> Feature: '+str(target))
dataFrame = df
pThresholdInput=pValThresholdInput
pThresholdTarget=pValThresholdTarget
cThresholdInput=corrThresholdInput
cThresholdTarget=corrThresholdTarget
numericDiscreteFeatures=[]
similarGruops=[]
numericContinuousFeatures=[]
categoricalFeatures=[]
nonNumericFeatures=[]
apca_features = []
dTypesDic={}
dataColumns = list(dataFrame.columns)
features_list = list(dataFrame.columns)
modelselectedFeatures=[]
topFeatures=[]
allFeatures=[]
targetType=""
# just to make sure feature engineering is false
#print(svdReducerStatus)
if featureEngineeringSelector.lower() == 'false' and correlationSelector.lower() == "true" and len(textFeatures) <= 0:
reducerObj=featureReducer()
self.log.info(featureReducer.__doc__)
self.log.info('Status:- |... Feature reduction started')
updatedNumericFeatures,updatedFeatures,similarGruops=reducerObj.startReducer(dataFrame,dataColumns,target,varThreshold)
if len(updatedFeatures) <= 1:
self.log.info('=======================================================')
self.log.info('Most of the features are of low variance. Use Model based feature engineering for better result')
self.log.info('=======================================================')
raise Exception('Most of the features are of low variance. Use Model based feature engineering for better result')
dataFrame=dataFrame[updatedFeatures]
dataColumns=list(dataFrame.columns)
self.log.info('Status:- |... Feature reduction completed')
elif (pcaReducerStatus.lower() == "true" or svdReducerStatus.lower() == 'true' or factorReducerStatus.lower() == 'true' or icaReducerStatus.lower()=='true') and featureEngineeringSelector.lower() == 'true':
# check is PCA or SVD is true
pcaColumns=[]
#print(svdReducerStatus.lower())
if target != "":
dataColumns.remove(target)
targetArray=df[target].values
targetArray.shape = (len(targetArray), 1)
if pcaReducerStatus.lower() == "true":
if nfeatures == 0:
pca = PCA(n_components='mle',svd_solver = 'full')
elif nfeatures < 1:
pca = PCA(n_components=nfeatures,svd_solver = 'full')
else:
pca = PCA(n_components=int(nfeatures))
pca.fit(df[dataColumns])
bpca_features = dataColumns.copy()
pcaArray=pca.transform(df[dataColumns])
method = 'PCA'
elif svdReducerStatus.lower() == 'true':
if nfeatures < 2:
nfeatures = 2
pca = TruncatedSVD(n_components=int(nfeatures), n_iter=7, random_state=42)
pca.fit(df[dataColumns])
bpca_features = dataColumns.copy()
pcaArray=pca.transform(df[dataColumns])
method = 'SVD'
elif factorReducerStatus.lower()=='true':
if int(nfeatures) == 0:
pca=FactorAnalysis()
else:
pca=FactorAnalysis(n_components=int(nfeatures))
pca.fit(df[dataColumns])
bpca_features = dataColumns.copy()
pcaArray=pca.transform(df[dataColumns])
method = 'FactorAnalysis'
elif icaReducerStatus.lower()=='true':
if int(nfeatures) == 0:
pca=FastICA()
else:
pca=FastICA(n_components=int(nfeatures))
pca.fit(df[dataColumns])
bpca_features = dataColumns.copy()
pcaArray=pca.transform(df[dataColumns])
method = 'IndependentComponentAnalysis'
pcaDF=pd.DataFrame(pcaArray)
#print(pcaDF)
for i in range(len(pcaDF.columns)):
pcaColumns.append(method+str(i))
topFeatures=pcaColumns
apca_features= pcaColumns.copy()
if target != '':
pcaColumns.append(target)
scaledDf = pd.DataFrame(np.hstack((pcaArray, targetArray)),columns=pcaColumns)
else:
scaledDf = pd.DataFrame(pcaArray,columns=pcaColumns)
self.log.info("<--- dataframe after dimensionality reduction using "+method)
self.log.info(scaledDf.head())
dataFrame=scaledDf
dataColumns=list(dataFrame.columns)
self.log.info('Status:- |... Feature reduction started')
self.log.info('Status:- |... '+method+' done')
self.log.info('Status:- |... Feature reduction completed')
self.numofCols = dataFrame.shape[1]
self.numOfRows = dataFrame.shape[0]
dataFDtypes=[]
for i in dataColumns:
dataType=dataFrame[i].dtypes
dataFDtypes.append(tuple([i,str(dataType)]))
#Categoring datatypes
for item in dataFDtypes:
dTypesDic[item[0]] = item[1]
if item[0] != target:
if item[1] in ['int16', 'int32', 'int64'] :
numericDiscreteFeatures.append(item[0])
elif item[1] in ['float16', 'float32', 'float64']:
numericContinuousFeatures.append(item[0])
else:
nonNumericFeatures.append(item[0])
self.numOfRows = dataFrame.shape[0]
'''
cFRatio = 0.01
if(self.numOfRows < 1000):
cFRatio = 0.2
elif(self.numOfRows < 10000):
cFRatio = 0.1
elif(self.numOfRows < 100000):
cFRatio = 0.01
'''
for i in numericDiscreteFeatures:
nUnique=len(dataFrame[i].unique().tolist())
nRows=self.numOfRows
if nUnique <= categoricalMaxLabel:
categoricalFeatures.append(i)
for i in numericContinuousFeatures:
nUnique=len(dataFrame[i].unique().tolist())
nRows=self.numOfRows
if nUnique <= categoricalMaxLabel:
categoricalFeatures.append(i)
discreteFeatures=list(set(numericDiscreteFeatures)-set(categoricalFeatures))
numericContinuousFeatures=list(set(numericContinuousFeatures)-set(categoricalFeatures))
self.log.info('-------> Numerical continuous features :'+(str(numericContinuousFeatures))[:500])
self.log.info('-------> Numerical discrete features :'+(str(discreteFeatures))[:500])
self.log.info('-------> Non numerical features :'+(str(nonNumericFeatures))[:500])
self.log.info('-------> Categorical Features :'+(str(categoricalFeatures))[:500])
if target !="" and featureEngineeringSelector.lower() == "false" and correlationSelector.lower() == "true":
self.log.info('\\n------- Feature Based Correlation Analysis Start ------')
start = time.time()
featureImpObj = featureImp()
topFeatures,targetType= featureImpObj.FFImpNew(dataFrame,numericContinuousFeatures,discreteFeatures,nonNumericFeatures,categoricalFeatures,dTypesDic,target,pThresholdInput,pThresholdTarget,cThresholdInput,cThresholdTarget,categoricalMaxLabel,problem_type,maxClasses)
#topFeatures,targetType= featureImpObj.FFImp(dataFrame,numericContinuousFeatures,discreteFeatures,nonNumericFeatures,categoricalFeatures,dTypesDic,target,pThreshold,cThreshold,categoricalMaxLabel,problem_type,maxClasses)
self.log.info('-------> Highly Correlated Features Using Correlation Techniques'+(str(topFeatures))[:500])
executionTime=time.time() - start
self.log.info('-------> Time Taken: '+str(executionTime))
self.log.info('Status:- |... Correlation based feature selection done: '+str(len(topFeatures))+' out of '+str(len(dataColumns))+' selected')
self.log.info('------- Feature Based Correlation Analysis End ------>\\n')
if targetType == '':
if problem_type.lower() == 'classification':
targetType = 'categorical'
if problem_type.lower() == 'regression':
targetType = 'continuous'
if target !="" and featureEngineeringSelector.lower() == "false" and modelSelector.lower() == "true":
self.log.info('\\n------- Model Based Correlation Analysis Start -------')
start = time.time()
updatedFeatures = dataColumns
updatedFeatures.remove(target)
#targetType = problem_type.lower()
modelselectedFeatures=[]
if targetType == 'categorical':
try:
xtrain=dataFrame[updatedFeatures]
ytrain=dataFrame[target]
etc = ExtraTreesClassifier(n_estimators=100)
etc.fit(xtrain, ytrain)
rfe = RFE(etc, n_features_to_select=1, verbose =0 )
rfe.fit(xtrain, ytrain)
# total list of features
ranks = {}
ranks["RFE_LR"] = ranking(list(map(float, rfe.ranking_)), dataColumns, order=-1)
for item in ranks["RFE_LR"]:
if ranks["RFE_LR"][item]>0.30: #threshold as 30%
modelselectedFeatures.append(item)
modelselectedFeatures = list(modelselectedFe |
atures)
self.log.info('-------> Highly Correlated Features Using Treeclassifier + RFE: '+(str(modelselectedFeatures))[:500])
except Exception as e:
self.log.info('---------------->'+str(e))
selector = SelectFromModel(ExtraTreesClassifier())
xtrain=dataFrame[updatedFeatures]
ytrain=dataFrame[target]
selector.fit(xtrain,ytrain)
modelselectedFeatures = xtrain.columns[(selector.get_support())].tolist()
self.log.info('-------> Highly Correlated Features Using Treeclassifier: '+(str(modelselectedFeatures))[:500])
else:
try:
xtrain=dataFrame[updatedFeatures]
ytrain=dataFrame[target]
ls = Lasso()
ls.fit(xtrain, ytrain)
rfe = RFE(ls, n_features_to_select=1, verbose = 0 )
rfe.fit(xtrain, ytrain)
# total list of features
ranks = {}
ranks["RFE_LR"] = ranking(list(map(float, rfe.ranking_)), dataColumns, order=-1)
for item in ranks["RFE_LR"]:
if ranks["RFE_LR"][item]>0.30: #threshold as 30%
modelselectedFeatures.append(item)
modelselectedFeatures = list(modelselectedFeatures)
self.log.info('-------> Highly Correlated Features Using LASSO + RFE: '+(str(modelselectedFeatures))[:500])
except Exception as e:
self.log.info('---------------->'+str(e))
selector = SelectFromModel(Lasso())
xtrain=dataFrame[updatedFeatures]
ytrain=dataFrame[target]
selector.fit(xtrain,ytrain)
modelselectedFeatures = xtrain.columns[(selector.get_support())].tolist()
self.log.info('-------> Highly Correlated Features Using LASSO: '+(str(modelselectedFeatures))[:500])
executionTime=time.time() - start
self.log.info('-------> Time Taken: '+str(executionTime))
self.log.info('Status:- |... Model based feature selection done: '+str(len(modelselectedFeatures))+' out of '+str(len(dataColumns))+' selected')
self.log.info('--------- Model Based Correlation Analysis End -----\\n')
if target !="" and featureEngineeringSelector.lower() == "false" and allFeaturesSelector.lower() == "true":
allFeatures = features_list
if target != '':
allFeatures.remove(target)
#print(allFeatures)
if len(topFeatures) == 0 and len(modelselectedFeatures) == 0 and len(allFeatures) == 0:
allFeatures = features_list
return dataFrame,target,topFeatures,modelselectedFeatures,allFeatures,targetType,similarGruops,numericContinuousFeatures,discreteFeatures,nonNumericFeatures,categoricalFeatures,pca,bpca_features,apca_features,featureEngineeringSelector
except Exception as inst:
self.log.info('Feature selector failed: '+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import sys
import json
import datetime,time,timeit
import itertools
#Sci-Tools imports
import numpy as np
import pandas as pd
import math
from statsmodels.tsa.stattools import adfuller
from scipy.stats.stats import pearsonr
from numpy import cumsum, log, polyfit, sqrt, std, subtract
from numpy.random import randn
#SDP1 class import
from feature_engineering.featureImportance import featureImp
from sklearn.feature_selection import VarianceThreshold
import logging
class featureReducer():
def __init__(self):
self.pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
self.log = logging.getLogger('eion')
def startReducer(self,df,data_columns,target,var_threshold):
self.log.info('\\n---------- Feature Reducer Start ----------')
dataframe = df
columns=data_columns
target = target
corrThreshold=1.0
categoricalFeatures=[]
nonNumericFeatures=[]
constFeatures=[]
qconstantColumns=[]
DtypesDic={}
numericFeatures=[]
nonNumericalFeatures=[]
similarFeatureGroups=[]
try:
dataFDtypes=self.dataFramecolType(dataframe)
for item in dataFDtypes:
DtypesDic[item[0]] = item[1]
if item[1] in self.pandasNumericDtypes:
numericFeatures.append(item[0])
else:
nonNumericFeatures.append(item[0])
#Checking for constant data features
for col in columns:
try:
distCount = len(dataframe[col].unique())
if(distCount == 1):
constFeatures.append(col)
except Exception as inst:
self.log.info('Unique Testing Fail for Col '+str(col))
numericalDataCols,nonNumericalDataCols = [],[]
#Removing constant data features
if(len(constFeatures) != 0):
self.log.info( '-------> Constant Features: '+str(constFeatures))
numericalDataCols = list(set(numericFeatures) - set(constFeatures))
nonNumericalDataCols = list(set(nonNumericFeatures) - set(constFeatures))
else:
numericalDataCols = list(set(numericFeatures))
nonNumericalDataCols = list(set(nonNumericFeatures))
if(len(numericalDataCols) > 1):
if var_threshold !=0:
qconstantFilter = VarianceThreshold(threshold=var_threshold)
tempDf=df[numericalDataCols]
qconstantFilter.fit(tempDf)
qconstantColumns = [column for column in numericalDataCols if column not in tempDf.columns[qconstantFilter.get_support()]]
if(len(qconstantColumns) != 0):
if target != '' and target in qconstantColumns:
qconstantColumns.remove(target)
self.log.info( '-------> Low Variant Features: '+str(qconstantColumns))
self.log.info('Status:- |... Low variance feature treatment done: '+str(len(qconstantColumns))+' low variance features found')
numericalDataCols = list(set(numericalDataCols) - set(qconstantColumns))
else:
self.log.info('Status:- |... Low variance feature treatment done: Found zero or 1 numeric feature')
#Minimum of two columns required for data integration
if(len(numericalDataCols) > 1):
numColPairs = list(itertools.product(numericalDataCols, numericalDataCols))
noDupList = []
for item in numColPairs:
if(item[0] != item[1]):
noDupList.append(item)
numColPairs = noDupList
tempArray = []
for item in numColPairs:
tempCorr = np.abs(dataframe[item[0]].corr(dataframe[item[1]]))
if(tempCorr > corrThreshold):
tempArray.append(item[0])
tempArray = np.unique(tempArray)
nonsimilarNumericalCols = list(set(numericalDataCols) - set(tempArray))
'''
Notes:
tempArray: List of all similar/equal data features
nonsimilarNumericalCols: List of all non-correlatable data features
'''
#Grouping similar/equal features
groupedFeatures = []
if(len(numericalDataCols) != len(nonsimilarNumericalCols)):
#self.log.info( '-------> Similar/Equal Features: Not Any')
#Correlation dictionary
corrDic = {}
for feature in tempArray:
temp = []
for col in tempArray:
tempCorr = np.abs(dataframe[feature].corr(dataframe[col]))
temp.append(tempCorr)
corrDic[feature] = temp
#Similar correlation dataframe
corrDF = pd.DataFrame(corrDic,index = tempArray)
corrDF.loc[:,:] = np.tril(corrDF, k=-1)
alreadyIn = set()
similarFeatures = []
for col in corrDF:
perfectCorr = corrDF[col][corrDF[col] > corrThreshold].index.tolist()
if perfectCorr and col not in alreadyIn:
alreadyIn.update(set(perfectCorr))
perfectCorr.append(col)
similarFeatures.append(perfectCorr)
self.log.info( '-------> No Similar/Equal Features: '+str(len(similarFeatures)))
for i in range(0,len(similarFeatures)):
similarFeatureGroups.append(similarFeatures[i])
#self.log.info((str(i+1)+' '+str(similarFeatures[i])))
self.log.info('-------> Similar/Equal Features: '+str(similarFeatureGroups))
self.log.info('-------> Non Similar Features :'+str(nonsimilarNumericalCols))
updatedSimFeatures = []
for items in similarFeatures:
if(target != '' and target in items):
for p in items:
updatedSimFeatures.append(p)
else:
updatedSimFeatures.append(items[0])
newTempFeatures = list(set(updatedSimFeatures + nonsimilarNumericalCols))
updatedNumFeatures = newTempFeatures
#self.log.info( '\\n <--- Merged similar/equal features into one ---> ')
updatedFeatures = list(set(newTempFeatures + nonNumericalDataCols))
self.log.info('Status:- |... Similar feature treatment done: '+str(len(similarFeatures))+' similar features found')
else:
updatedNumFeatures = numericalDataCols
updatedFeatures = list(set(columns) - set(constFeatures)-set(qconstantColumns))
self.log.info( '-------> Similar/Equal Features: Not Any')
self.log.info('Status:- |... Similar feature treatment done: No similar features found')
else:
updatedNumFeatures = numericalDataCols
updatedFeatures = list(set(columns) - set(constFeatures)-set(qconstantColumns))
self.log.info( '\\n-----> Need minimum of two numerical features for data integration.')
self.log.info('Status:- |... Similar feature treatment done: Found zero or 1 numeric feature')
self.log.info('---------- Feature Reducer End ----------\\n')
return updatedNumFeatures,updatedFeatures,similarFeatureGroups
except Exception as inst:
self.log.info("feature Reducer failed "+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
return [],[]
def dataFramecolType(self,dataFrame):
dataFDtypes=[]
try:
dataColumns=list(dataFrame.columns)
for i in dataColumns:
dataType=dataFrame[i].dtypes
dataFDtypes.append(tuple([i,str(dataType)]))
return dataFDtypes
except:
self.log.info("error in dataFramecolyType")
return dataFDtypes
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> |
'''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
#System imports
import os
import sys
import json
import datetime,time,timeit
import itertools
#Sci-Tools imports
import numpy as np
import pandas as pd
import math
from sklearn.metrics import normalized_mutual_info_score
from sklearn.feature_selection import f_regression,mutual_info_regression
from sklearn.feature_selection import chi2,f_classif,mutual_info_classif
import scipy.stats
from scipy.stats import pearsonr, spearmanr, pointbiserialr, f_oneway, kendalltau, chi2_contingency
import statsmodels.api as sm
import statsmodels.formula.api as smf
import logging
def getHigherSignificanceColName(featureDict, colname1, colname2):
if featureDict[colname1]<featureDict[colname2]:
return colname2
else:
return colname1
class featureImp():
def __init__(self):
self.dTypesDic = {}
self.featureImpDic={}
self.indexedDic = {}
self.log = logging.getLogger('eion')
def FFImpNew(self,df,contFeatures,discreteFeatures,nonNumericFeatures,categoricalFeatures,dTypesDic,target,pValThInput,pValThTarget,corrThInput,corrThTarget,categoricalMaxLabel,problem_type,maxClasses):
try:
dataframe = df
contiFeatures= contFeatures
quantFeatures=discreteFeatures+contiFeatures
categoricalFeatures=categoricalFeatures
targetData=dataframe[target]
nUnique=len(targetData.unique().tolist())
if nUnique <= categoricalMaxLabel:
targetType="categorical"
else:
targetType="continuous"
if problem_type.lower() == 'classification' and targetType == 'continuous':
targetType = 'categorical'
self.log.info( '-------> Change Target Type to Categorial as user defined')
if problem_type.lower() == 'regression' and targetType == 'categorical':
targetType = 'continuous'
self.log.info( '-------> Change Target Type to Continuous as user defined')
self.log.info( '-------> Target Type: '+str(targetType))
impFeatures=[]
catFeature = []
numFeature = []
catFeatureXYcat = []
numFeatureXYcat = []
catFeatureXYnum= []
numFeatureXYnum = []
dropFeatureCat= []
dropFeatureNum = []
featureDict = {}
if targetType =="categorical":
if len(categoricalFeatures) !=0:
# input vs target
# chi-square
for col in categoricalFeatures:
contingency = pd.crosstab(dataframe[col], targetData)
stat, p, dof, expected = chi2_contingency(contingency)
if p <= pValThTarget:
catFeatureXYcat.append(col) # categorical feature xy when target is cat
featureDict[col] = p
#input vs input
# chi_square
if len(catFeatureXYcat) != 0:
length = len(catFeatureXYcat)
for i in range(length):
for j in range(i+1, length):
contingency = pd.crosstab(dataframe[catFeatureXYcat[i]], dataframe[catFeatureXYcat[j]])
stat, p, dof, expected = chi2_contingency(contingency)
if p > pValThInput:
highSignificanceColName = getHigherSignificanceColName(featureDict, catFeatureXYcat[i], catFeatureXYcat[j])
dropFeatureCat.append(highSignificanceColName)
break
catFeature = list(set(catFeatureXYcat) - set(dropFeatureCat))
featureDict.clear()
dropFeatureCat.clear()
if len(quantFeatures) !=0:
# input vs target
# one way anova
for col in quantFeatures:
CategoryGroupLists = dataframe.groupby(target)[col].apply(list)
AnovaResults = f_oneway(*CategoryGroupLists)
if AnovaResults[1] <= pValThTarget:
numFeatureXYcat.append(col) #numeric feature xy when target is cat
featureDict[col] = AnovaResults[1]
#input vs input
# preason/spearman/ols # numeric feature xx when target is cat
if len(numFeatureXYcat) != 0:
df_xx = dataframe[numFeatureXYcat]
rows, cols = df_xx.shape
flds = list(df_xx.columns)
corr_pearson = df_xx.corr(method='pearson').values
corr_spearman = df_xx.corr(method='spearman').values
for i in range(cols):
for j in range(i+1, cols):
if corr_pearson[i,j] > -corrThInput and corr_pearson[i,j] < corrThInput:
if corr_spearman[i,j] > -corrThInput and corr_spearman[i,j] < corrThInput:
#f = "'"+flds[i]+"'"+' ~ '+"'"+flds[j]+"'"
#reg = smf.ols(formula=f, data=dataframe).fit()
tmpdf = pd.DataFrame({'x':dataframe[flds[j]], 'y':dataframe[flds[i]]})
reg = smf.ols('y~x', data=tmpdf).fit()
if len(reg.pvalues) > 1 and reg.pvalues[1] > pValThInput:
highSignificanceColName = getHigherSignificanceColName(featureDict, flds[i], flds[j])
dropFeatureNum.append(highSignificanceColName)
break
else:
highSignificanceColName = getHigherSignificanceColName(featureDict, flds[i], flds[j])
dropFeatureNum.append(highSignificanceColName)
break
else:
highSignificanceColName = getHigherSignificanceColName(featureDict, flds[i], flds[j])
dropFeatureNum.append(highSignificanceColName)
break
numFeature = list(set(numFeatureXYcat) - set(dropFeatureNum))
dropFeatureNum.clear()
featureDict.clear()
impFeatures = numFeature+catFeature
hCorrFeatures=list(set((impFeatures)))
else: # targetType =="continuous":
if len(categoricalFeatures) !=0:
# input vs target
# Anova
for col in categoricalFeatures:
#f = target+' ~ C('+col+')'
#model = smf.ols(f, data=dataframe).fit()
#table = sm.stats.anova_lm(model, typ=2)
tmpdf = pd.DataFrame({'x':dataframe[col], 'y':dataframe[target]})
model = smf.ols('y~x', data=tmpdf).fit()
table = sm.stats.anova_lm(model, typ=2)
if table['PR(>F)'][0] <= pValThTarget:
catFeatureXYnum.append(col) #categorical feature xy when target is numeric
featureDict[col]=table['PR(>F)'][0]
#input vs input
# chi_square
if len(catFeatureXYnum) != 0:
length = len(catFeatureXYnum)
for i in range(length):
for j in range(i+1, length):
contingency = pd.crosstab(dataframe[catFeatureXYnum[i]], dataframe[catFeatureXYnum[j]])
stat, p, dof, expected = chi2_contingency(contingency)
if p > pValThInput:
highSignificanceColName = getHigherSignificanceColName(featureDict, catFeatureXYnum[i], catFeatureXYnum[j])
dropFeatureCat.append(highSignificanceColName)
break
catFeature = list(set(catFeatureXYnum) - set(dropFeatureCat))
dropFeatureCat.clear()
featureDict.clear()
if len(quantFeatures) !=0:
# input vs target
# preason/spearman/ols
for col in quantFeatures:
pearson_corr = pearsonr(dataframe[col], targetData)
coef = round(pearson_corr[0],5)
p_value = round(pearson_corr[1],5)
if coef > -corrThTarget and coef < corrThTarget:
spearman_corr = spearmanr(dataframe[col], targetData)
coef = round(spearman_corr[0],5)
p_value = round(spearman_corr[1],5)
if coef > -corrThTarget and coef < corrThTarget:
#f = target+' ~ '+col
#reg = smf.ols(formula=f, data=dataframe).fit()
tmpdf = pd.DataFrame({'x':dataframe[col], 'y':dataframe[target]})
reg = smf.ols('y~x', data=tmpdf).fit()
if len(reg.pvalues) > 1 and reg.pvalues[1] <= pValThTarget:
numFeatureXYnum.append(col) # numeric feature xx when target is numeric
featureDict[col]=reg.pvalues[1]
else:
numFeatureXYnum.append(col)
featureDict[col]=p_value
else:
numFeatureXYnum.append(col)
featureDict[col]=p_value
#input vs input
# preason/spearman/ols
if len(numFeatureXYnum) != 0:
df_xx = dataframe[numFeatureXYnum]
rows, cols = df_xx.shape
flds = list(df_xx.columns)
corr_pearson = df_xx.corr(method='pearson').values
corr_spearman = df_xx.corr(method='spearman').values
for i in range(cols):
for j in range(i+1, cols):
if corr_pearson[i,j] > -corrThInput and corr_pearson[i,j] < corrThInput:
if corr_spearman[i,j] > -corrThInput and corr_spearman[i,j] < corrThInput:
#f = flds[i]+' ~ '+flds[j]
#reg = smf.ols(formula=f, data=dataframe).fit()
tmpdf = pd.DataFrame({'x':dataframe[flds[j]], 'y':dataframe[flds[i]]})
reg = smf.ols('y~x', data=tmpdf).fit()
if len(reg.pvalues) > 1 and reg.pvalues[1] > pValThInput:
highSignificanceColName = getHigherSignificanceColName(featureDict, flds[i], flds[j])
dropFeatureNum.append(highSignificanceColName)
break
|
else:
highSignificanceColName = getHigherSignificanceColName(featureDict, flds[i], flds[j])
dropFeatureNum.append(highSignificanceColName)
break
else:
highSignificanceColName = getHigherSignificanceColName(featureDict, flds[i], flds[j])
dropFeatureNum.append(highSignificanceColName)
break
numFeature = list(set(numFeatureXYnum) - set(dropFeatureNum))
featureDict.clear()
dropFeatureNum.clear()
impFeatures = numFeature+catFeature
hCorrFeatures=list(set(impFeatures))
return hCorrFeatures,targetType
except Exception as inst:
self.log.info( '\\n--> Failed calculating feature importance '+str(inst))
hCorrFeatures=[]
targetType=''
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
self.log.info('\\n--> Taking all the features as highest correlation features')
hCorrFeatures = list(dataframe.columns)
return hCorrFeatures,targetType
def FFImp(self,df,contFeatures,discreteFeatures,nonNumericFeatures,categoricalFeatures,dTypesDic,target,pValTh,corrTh,categoricalMaxLabel,problem_type,maxClasses):
'''
Input: dataframe, numeric continuous features, numeric discrete features
Output: feature importance dictionary
'''
try:
dataframe =df
contiFeatures= contFeatures
discreteFeatures = discreteFeatures
nonNumeric = nonNumericFeatures
categoricalFeatures=categoricalFeatures
self.dTypesDic = dTypesDic
numericFeatures = contiFeatures + discreteFeatures+categoricalFeatures
quantFeatures=discreteFeatures+contiFeatures
scorrDict={}
fScoreDict={}
pcorrDict={}
miDict={}
targetData=dataframe[target]
data=dataframe[numericFeatures]
nUnique=len(targetData.unique().tolist())
nRows=targetData.shape[0]
'''
print("\\n ===> nUnique :")
print(nUnique)
print("\\n ===> nRows :")
print(nRows)
print("\\n ===> cFRatio :")
print(cFRatio)
print("\\n ===> nUnique/nRows :")
'''
#calratio = nUnique
self.log.info( '-------> Target Column Unique Stats: '+str(nUnique)+' nRows: '+str(nRows)+' Unique:'+str(nUnique))
#sys.exit()
if nUnique <= categoricalMaxLabel:
targetType="categorical"
else:
targetType="continuous"
if problem_type.lower() == 'classification' and targetType == 'continuous':
targetType = 'categorical'
self.log.info( '-------> Change Target Type to Categorial as user defined')
if problem_type.lower() == 'regression' and targetType == 'categorical':
targetType = 'continuous'
self.log.info( '-------> Change Target Type to Continuous as user defined')
self.log.info( '-------> Target Type: '+str(targetType))
impFeatures=[]
featureImpDict={}
if targetType =="categorical":
try:
if len(categoricalFeatures) !=0:
categoricalData=dataframe[categoricalFeatures]
chiSqCategorical=chi2(categoricalData,targetData)[1]
corrSeries=pd.Series(chiSqCategorical, index=categoricalFeatures)
impFeatures.append(corrSeries[corrSeries<pValTh].index.tolist())
corrDict=corrSeries.to_dict()
featureImpDict['chiSquaretestPValue']=corrDict
except Exception as inst:
self.log.info("Found negative values in categorical variables "+str(inst))
if len(quantFeatures) !=0:
try:
quantData=dataframe[quantFeatures]
fclassScore=f_classif(quantData,targetData)[1]
miClassScore=mutual_info_classif(quantData,targetData)
fClassSeries=pd.Series(fclassScore,index=quantFeatures)
miClassSeries=pd.Series(miClassScore,index=quantFeatures)
impFeatures.append(fClassSeries[fClassSeries<pValTh].index.tolist())
impFeatures.append(miClassSeries[miClassSeries>corrTh].index.tolist())
featureImpDict['anovaPValue']=fClassSeries.to_dict()
featureImpDict['MIScore']=miClassSeries.to_dict()
except MemoryError as inst:
self.log.info( '-------> MemoryError in feature selection. '+str(inst))
pearsonScore=dataframe.corr()
targetPScore=abs(pearsonScore[target])
impFeatures.append(targetPScore[targetPScore<pValTh].index.tolist())
featureImpDict['pearsonCoff']=targetPScore.to_dict()
hCorrFeatures=list(set(sum(impFeatures, [])))
else:
if len(quantFeatures) !=0:
try:
quantData =dataframe[quantFeatures]
fregScore=f_regression(quantData,targetData)[1]
miregScore=mutual_info_regression(quantData,targetData)
fregSeries=pd.Series(fregScore,index=quantFeatures)
miregSeries=pd.Series(miregScore,index=quantFeatures)
impFeatures.append(fregSeries[fregSeries<pValTh].index.tolist())
impFeatures.append(miregSeries[miregSeries>corrTh].index.tolist())
featureImpDict['anovaPValue']=fregSeries.to_dict()
featureImpDict['MIScore']=miregSeries.to_dict()
except MemoryError as inst:
self.log.info( '-------> MemoryError in feature selection. '+str(inst))
pearsonScore=dataframe.corr()
targetPScore=abs(pearsonScore[target])
impFeatures.append(targetPScore[targetPScore<pValTh].index.tolist())
featureImpDict['pearsonCoff']=targetPScore.to_dict()
hCorrFeatures=list(set(sum(impFeatures, [])))
return hCorrFeatures,targetType
except Exception as inst:
self.log.info( '\\n--> Failed calculating feature importance '+str(inst))
hCorrFeatures=[]
targetType=''
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
return hCorrFeatures,targetType
'''
Importance degree
Computes set of relational parameters
pearson correlation, mutual information
'''
def importanceDegree(self,dataframe,feature1,feature2):
try:
tempList = []
#Parameter 1: pearson correlation
pcorr = self.pearsonCoff(dataframe,feature1,feature2)
tempList.append(pcorr)
#Parameter 2: mutual information
#Testing
mi = self.mutualInfo(dataframe,feature1,feature2,self.dTypesDic)
tempList.append(mi)
#return the highest parameter
return np.max(tempList)
except:
return 0.0
'''
Compute pearson correlation
'''
def pearsonCoff(self,dataframe,feature1,feature2):
try:
value=dataframe[feature1].corr(dataframe[feature2])
return np.abs(value)
except:
return 0.0
'''
Compute mutual information
'''
def mutualInfo(self,dataframe,feature1,feature2,typeDic):
try:
numType = {'int64': 'discrete','int32' : 'discrete','int16' : 'discrete','float16' : 'continuous','float32' : 'continuous','float64' : 'continuous'}
featureType1 = numType[typeDic[feature1]]
featureType2 = numType[typeDic[feature2]]
bufferList1=dataframe[feature1].values.tolist()
bufferList2=dataframe[feature2].values.tolist()
#Case 1: Only if both are discrete
if(featureType1 == 'discrete' and featureType2 == 'discrete'):
tempResult = discreteMI(bufferList1,bufferList2)
return np.mean(tempResult)
#Case 2: If one of the features is continuous
elif(featureType1 == 'continuous' and featureType2 == 'discrete'):
tempResult = self.categoricalMI(bufferList1,bufferList2)
return np.mean(tempResult)
else:
tempResult = self.continuousMI(bufferList1,bufferList2)
return np.mean(tempResult)
except:
return 0.0
def continuousMI(self,bufferList1,bufferList2):
mi = 0.0
#Using mutual info regression from feature selection
mi = mutual_info_regression(self.vec(bufferList1),bufferList2)
return mi
def categoricalMI(self,bufferList1,bufferList2):
mi = 0.0
#Using mutual info classification from feature selection
mi = mutual_info_classif(self.vec(bufferList1),bufferList2)
return mi
def discreteMI(self,bufferList1,bufferList2):
mi = 0.0
#Using scikit normalized mutual information function
mi = normalized_mutual_info_score(bufferList1,bufferList2)
return mi
def vec(self,x):
return [[i] for i in x]
<s>
import pandas as pd
import numpy as np
from appbe.eda import ux_eda
from sklearn.preprocessing import LabelEncoder
import json
import matplotlib.pyplot as plt
import os
import mpld3
import subprocess
import os
import sys
import re
import json
import pandas as pd
from appbe.eda import ux_eda
from aif360.datasets import StandardDataset
from aif360.metrics import ClassificationMetric
from aif360.datasets import BinaryLabelDataset
def get_metrics(request):
dataFile = os.path.join(request.session['deploypath'], "data", "preprocesseddata.csv.gz")
predictionScriptPath = os.path.join(request.session['deploypath'], 'aion_predict.py')
displaypath = os.path.join(request.session['deploypath'], "etc", "display.json")
f = open(displaypath, "r")
configSettings = f.read()
f.close()
configSettings = json.loads(configSettings)
Target_feature = configSettings['targetFeature']
outputStr = subprocess.check_output([sys.executable, predictionScriptPath, dataFile])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)', str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
predict_dict = json.loads(outputStr)
df = pd.read_csv(dataFile)
df_p = pd.DataFrame.from_dict(predict_dict['data'])
d3_url = request.GET.get('d3_url')
mpld3_url = request.GET.get('mpld3_url')
df_temp = request.GET.get('feature')
global metricvalue
metricvalue = request.GET.get('metricvalue')
Protected_feature = df_temp
df_p = df_p.drop(columns=[Target_feature, 'remarks', 'probability'])
df_p.rename(columns={'prediction': Target_feature}, inplace=True)
eda_obj = ux_eda(dataFile, optimize=1)
features,dateFeature,seqFeature,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catfeatures = eda_obj.getFeatures()
features_to_Encode = features
categorical_names = {}
encoders = {}
for feature in features_to_Encode:
le = LabelEncoder()
le.fit(df[feature])
df[feature] = le.transform(df[feature])
le.fit(df_p[feature])
df_p[feature] = le.transform(df_p[feature])
categorical_names[feature] = le.classes_
encoders[feature] |
= le
new_list = [item for item in categorical_names[Protected_feature] if not(pd.isnull(item)) == True]
claas_size = len(new_list)
if claas_size > 10:
return 'HeavyFeature'
metrics = fair_metrics(categorical_names |
satype.lower() == 'first':
S = Si['S1']
else:
S = Si['ST']
return S
except Exception as e:
print('Error in calculating Si for Regression: ', str(e))
raise ValueError(str(e))
def plotSi(self, S, saType):
try:
import matplotlib.pyplot as plt
if saType.lower() == 'first':
title, label = 'Sensitivity Analysis', 'First order'
else:
title, label = 'Sensitivity Analysis', 'Total order'
x = np.arange(len(self.problem['names']))
width = 0.35
fig, ax = plt.subplots()
ax.bar(x - width / 2, S, width, label=label)
ax.set_xticks(x)
ax.set_xlabel('Features')
ax.set_ylabel('Sensitivity Indices')
ax.set_title(title)
ax.set_xticklabels(self.problem['names'], rotation=45, ha="right")
ax.legend()
plt.tight_layout()
image = io.BytesIO()
plt.savefig(image, format='png')
image.seek(0)
string = base64.b64encode(image.read())
SAimage = 'data:image/png;base64,' + urllib.parse.quote(string)
except Exception as e:
print(e)
SAimage = ''
return SAimage
def checkModelType(modelName):
isML= False
isDL = False
if modelName in ["Neural Network", "Convolutional Neural Network (1D)", "Recurrent Neural Network","Recurrent Neural Network (GRU)",
"Recurrent Neural Network (LSTM)", "Neural Architecture Search", "Deep Q Network", "Dueling Deep Q Network"]:
isDL = True
elif modelName in ["Linear Regression","Lasso","Ridge","Logistic Regression", "Naive Bayes", "Decision Tree", "Random Forest", "Support Vector Machine", "K Nearest Neighbors", "Gradient Boosting",
"Extreme Gradient Boosting (XGBoost)", "Light Gradient Boosting (LightGBM)", "Categorical Boosting (CatBoost)","Bagging (Ensemble)"]:
isML = True
return isML,isDL
def startSA(request):
try:
displaypath = os.path.join(request.session['deploypath'], "etc", "display.json")
if not os.path.exists(displaypath):
raise Exception('Config file not found.')
with open(displaypath) as file:
config = json.load(file)
probelmType = config['problemType']
if probelmType.lower() not in ['classification','regression']:
raise Exception(f"Probolem Type: {probelmType} not supported")
isML,isDL = checkModelType(config['modelname'])
sample_size = 1024
if isML:
model = joblib.load(os.path.join(request.session['deploypath'], 'model', config['saved_model']))
sample_size = 2048
if isDL:
from tensorflow.keras.models import load_model
model = load_model(os.path.join(request.session['deploypath'], 'model', config['saved_model']))
sample_size = 512
target = config['targetFeature']
featureName = config['modelFeatures']
dataPath = os.path.join(request.session['deploypath'], 'data', 'postprocesseddata.csv.gz')
if not os.path.exists(dataPath):
raise Exception('Data file not found.')
from utils.file_ops import read_df_compressed
read_status,dataFrame = read_df_compressed(dataPath)
obj = sensitivityAnalysis(model, probelmType, dataFrame, target, featureName)
obj.preprocess()
obj.generate_samples(sample_size)
submitType = str(request.GET.get('satype'))
saType = 'first' if submitType == 'first' else 'total'
if probelmType.lower() == 'classification':
SA_values = obj.calSiClass(saType,isML,isDL)
else:
SA_values = obj.calSiReg(saType,isML,isDL)
if SA_values.size and saType:
graph = obj.plotSi(SA_values, saType)
if graph:
outputJson = {'Status': "Success", "graph": graph}
else:
outputJson = {'Status': "Error", "graph": '','reason':'Error in Plotting Graph'}
else:
outputJson = {'Status': "Error", "graph": '','reason':'Error in calculating Si values'}
output_json = json.dumps(outputJson)
return output_json
except Exception as e:
print(str(e))
raise ValueError(str(e))
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> import joblib
import pandas as pd
import sys
import math
import time
import pandas as pd
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score
from sklearn.metrics import r2_score,mean_absolute_error,mean_squared_error
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.svm import SVC
from sklearn.linear_model import LinearRegression
import argparse
import json
def mltesting(modelfile,datafile,features,target):
model = joblib.load(modelfile)
ProblemName = model.__class__.__name__
if ProblemName in ['LogisticRegression','SGDClassifier','SVC','DecissionTreeClassifier','RandomForestClassifier','GaussianNB','KNeighborsClassifier','DecisionTreeClassifier','GradientBoostingClassifier','XGBClassifier','LGBMClassifier','CatBoostClassifier']:
Problemtype = 'Classification'
elif ProblemName in ['LinearRegression','Lasso','Ridge','DecisionTreeRegressor','RandomForestRegressor','GradientBoostingRegressor','XGBRegressor','LGBMRegressor','CatBoostRegressor']:
Problemtype = 'Regression'
else:
Problemtype = 'Unknown'
if Problemtype == 'Classification':
Params = model.get_params()
try:
df = pd.read_csv(datafile,encoding='utf-8',skipinitialspace = True)
if ProblemName == 'LogisticRegression' or ProblemName == 'DecisionTreeClassifier' or ProblemName == 'RandomForestClassifier' or ProblemName == 'GaussianNB' or ProblemName == 'KNeighborsClassifier' or ProblemName == 'GradientBoostingClassifier' or ProblemName == 'SVC':
features = model.feature_names_in_
elif ProblemName == 'XGBClassifier':
features = model.get_booster().feature_names
elif ProblemName == 'LGBMClassifier':
features = model.feature_name_
elif ProblemName == 'CatBoostClassifier':
features = model.feature_names_
modelfeatures = features
dfp = df[modelfeatures]
tar = target
target = df[tar]
predic = model.predict(dfp)
output = {}
matrixconfusion = pd.DataFrame(confusion_matrix(predic,target))
matrixconfusion = matrixconfusion.to_json(orient='index')
classificationreport = pd.DataFrame(classification_report(target,predic,output_dict=True)).transpose()
classificationreport = round(classificationreport,2)
classificationreport = classificationreport.to_json(orient='index')
output["Precision"] = "%.2f" % precision_score(target, predic,average='weighted')
output["Recall"] = "%.2f" % recall_score(target, predic,average='weighted')
output["Accuracy"] = "%.2f" % accuracy_score(target, predic)
output["ProblemName"] = ProblemName
output["Status"] = "Success"
output["Params"] = Params
output["Problemtype"] = Problemtype
output["Confusionmatrix"] = matrixconfusion
output["classificationreport"] = classificationreport
# import statistics
# timearray = []
# for i in range(0,5):
# start = time.time()
# predic1 = model.predict(dfp.head(1))
# end = time.time()
# timetaken = (round((end - start) * 1000,2),'Seconds')
# timearray.append(timetaken)
# print(timearray)
start = time.time()
for i in range(0,5):
predic1 = model.predict(dfp.head(1))
end = time.time()
timetaken = (round((end - start) * 1000,2),'Seconds')
# print(timetaken)
start1 = time.time()
for i in range(0,5):
predic2 = model.predict(dfp.head(10))
end1 = time.time()
timetaken1 = (round((end1 - start1) * 1000,2) ,'Seconds')
# print(timetaken1)
start2 = time.time()
for i in range(0,5):
predic3 = model.predict(dfp.head(100))
end2 = time.time()
timetaken2 = (round((end2 - start2) * 1000,2) ,'Seconds')
# print(timetaken2)
output["onerecord"] = timetaken
output["tenrecords"] = timetaken1
output["hundrecords"] = timetaken2
print(json.dumps(output))
except Exception as e:
output = {}
output['Problemtype']='Classification'
output['Status']= "Fail"
output["ProblemName"] = ProblemName
output["Msg"] = 'Detected Model : {} \\\\n Problem Type : Classification \\\\n Error : {}'.format(ProblemName, str(e).replace('"','//"').replace('\\n', '\\\\n'))
print(output["Msg"])
print(json.dumps(output))
elif Problemtype == 'Regression':
Params = model.get_params()
try:
df = pd.read_csv(datafile,encoding='utf-8',skipinitialspace = True)
if ProblemName == 'LinearRegression' or ProblemName == 'Lasso' or ProblemName == 'Ridge' or ProblemName == 'DecisionTreeRegressor' or ProblemName == 'RandomForestRegressor' or ProblemName == 'GaussianNB' or ProblemName == 'KNeighborsRegressor' or ProblemName == 'GradientBoostingRegressor':
features = model.feature_names_in_
elif ProblemName == 'XGBRegressor':
features = model.get_booster().feature_names
elif ProblemName == 'LGBMRegressor':
features = model.feature_name_
elif ProblemName == 'CatBoostRegressor':
features = model.feature_names_
modelfeatures = features
dfp = df[modelfeatures]
tar = target
target = df[tar]
predict = model.predict(dfp)
mse = mean_squared_error(target, predict)
mae = mean_absolute_error(target, predict)
rmse = math.sqrt(mse)
r2 = r2_score(target,predict,multioutput='variance_weighted')
output = {}
output["MSE"] = "%.2f" % mean_squared_error(target, predict)
output["MAE"] = "%.2f" % mean_absolute_error(target, predict)
output["RMSE"] = "%.2f" % math.sqrt(mse)
output["R2"] = "%.2f" %r2_score(target,predict,multioutput='variance_weighted')
output["ProblemName"] = ProblemName
output["Problemtype"] = Problemtype
output["Params"] = Params
output['Status']='Success'
start = time.time()
predic1 = model.predict(dfp.head(1))
end = time.time()
timetaken = (round((end - start) * 1000,2) ,'Seconds')
# print(timetaken)
start1 = time.time()
predic2 = model.predict(dfp.head(10))
end1 = time.time()
timetaken1 = (round((end1 - start1) * 1000,2),'Seconds')
# print(timetaken1)
start2 = time.time()
predic3 = model.predict(dfp.head(100))
end2 = time.time()
timetaken2 = (round((end2 - start2) * 1000,2) ,'Seconds')
# print(timetaken2)
output["onerecord"] = timetaken
output["tenrecords"] = timetaken1
output["hundrecords"] = timetaken2
print(json.dumps(output))
except Exception as e:
output = {}
output['Problemtype']='Regression'
output['Status']='Fail'
output["ProblemName"] = ProblemName
output["Msg"] = 'Detected Model : {} \\\\n Problem Type : Regression \\\\n Error : {}'.format(ProblemName, str(e).replace('"','//"').replace('\\n', '\\\\n'))
print(json.dumps(output))
else:
output = {}
output['Problemtype']='Unknown'
output['Status']='Fail'
output['Params'] = ''
output["ProblemName"] = ProblemName
output["Msg"] = 'Detected Model : {} \\\\n Error : {}'.format(ProblemName, 'Model not supported')
print(json.dumps(output))
return(json.dumps(output))
def baseline_testing(modelFile,csvFile,features,target):
features = [x.strip() for x in features.split(',')]
return mltesting(modelFile,csvFile,features,target)<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Techn |
ologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import warnings
import numpy as np
import pandas as pd
import sklearn.metrics as metrics
from collections import defaultdict
from sklearn.metrics import confusion_matrix
import re
import shutil
import scipy.stats as st
import json
import os,sys
import glob
import logging
from utils.file_ops import read_df_compressed
class Visualization():
def __init__(self,usecasename,version,dataframe,visualizationJson,dateTimeColumn,deployPath,dataFolderLocation,numericContinuousFeatures,discreteFeatures,categoricalFeatures,modelFeatures,targetFeature,modeltype,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,labelMaps,vectorizerFeatures,textFeatures,numericalFeatures,nonNumericFeatures,emptyFeatures,nrows,ncols,saved_model,scoreParam,learner_type,modelname,featureReduction,reduction_data_file):
self.dataframe = dataframe
self.displayjson = {}
self.visualizationJson = visualizationJson
self.dateTimeColumn = dateTimeColumn
self.deployPath = deployPath
#shutil.copy2(os.path.join(os.path.dirname(os.path.abspath(__file__)),'aion_portal.py'),self.deployPath)
if learner_type == 'ML' and modelname != 'Neural Architecture Search':
if(os.path.isfile(os.path.join(self.deployPath,'explainable_ai.py'))):
os.remove(os.path.join(self.deployPath,'explainable_ai.py'))
shutil.copy2(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','utilities','xai','explainable_ai.py'),self.deployPath)
# os.rename(os.path.join(self.deployPath,'explainable_ai.py'),os.path.join(self.deployPath,'aion_xai.py'))
try:
os.rename(os.path.join(self.deployPath,'explainable_ai.py'),os.path.join(self.deployPath,'aion_xai.py'))
except FileExistsError:
os.remove(os.path.join(self.deployPath,'aion_xai.py'))
os.rename(os.path.join(self.deployPath,'explainable_ai.py'),os.path.join(self.deployPath,'aion_xai.py'))
elif learner_type == 'DL' or modelname == 'Neural Architecture Search':
if(os.path.isfile(os.path.join(self.deployPath,'explainable_ai.py'))):
os.remove(os.path.join(self.deployPath,'explainable_ai.py'))
shutil.copy2(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','utilities','xai','explainabledl_ai.py'),self.deployPath)
# os.rename(os.path.join(self.deployPath,'explainabledl_ai.py'),os.path.join(self.deployPath,'aion_xai.py'))
try:
os.rename(os.path.join(self.deployPath,'explainabledl_ai.py'),os.path.join(self.deployPath,'aion_xai.py'))
except FileExistsError:
os.remove(os.path.join(self.deployPath,'aion_xai.py'))
os.rename(os.path.join(self.deployPath,'explainabledl_ai.py'),os.path.join(self.deployPath,'aion_xai.py'))
self.jsondeployPath = deployPath
#self.deployPath = self.deployPath+'visualization/'
self.dataFolderLocation = dataFolderLocation
self.vectorizerFeatures = vectorizerFeatures
self.textFeatures = textFeatures
self.emptyFeatures = emptyFeatures
'''
try:
os.makedirs(self.deployPath)
except OSError as e:
print("\\nFolder Already Exists")
'''
self.numericContinuousFeatures = numericContinuousFeatures
self.discreteFeatures = discreteFeatures
self.categoricalFeatures = categoricalFeatures
self.modelFeatures = modelFeatures
self.modeltype = modeltype
self.targetFeature = targetFeature
self.displayjson['usecasename'] = str(usecasename)
self.displayjson['version'] = str(version)
self.displayjson['problemType'] = str(self.modeltype)
self.displayjson['targetFeature'] = self.targetFeature
self.displayjson['numericalFeatures'] = numericalFeatures
self.displayjson['nonNumericFeatures'] = nonNumericFeatures
self.displayjson['modelFeatures'] = self.modelFeatures
self.displayjson['textFeatures'] = self.textFeatures
self.displayjson['emptyFeatures'] = self.emptyFeatures
self.displayjson['modelname']= str(modelname)
self.displayjson['preprocessedData'] = str(original_data_file)
self.displayjson['nrows'] = str(nrows)
self.displayjson['ncols'] = str(ncols)
self.displayjson['saved_model'] = str(saved_model)
self.displayjson['scoreParam'] = str(scoreParam)
self.displayjson['labelMaps'] = eval(str(labelMaps))
self.original_data_file = original_data_file
self.displayjson['featureReduction'] = featureReduction
if featureReduction == 'True':
self.displayjson['reduction_data_file'] = reduction_data_file
else:
self.displayjson['reduction_data_file'] = ''
self.pred_filename = predicted_data_file
self.profiled_data_file = profiled_data_file
self.displayjson['predictedData'] = predicted_data_file
self.displayjson['postprocessedData'] = profiled_data_file
#self.trained_data_file = trained_data_file
#self.displayjson['trainingData'] = trained_data_file
#self.displayjson['categorialFeatures']=categoricalFeatures
#self.displayjson['discreteFeatures']=discreteFeatures
#self.displayjson['continuousFeatures']=numericContinuousFeatures
#y = json.dumps(self.displayjson)
#print(y)
self.labelMaps = labelMaps
self.log = logging.getLogger('eion')
def visualizationrecommandsystem(self):
try:
import tensorflow.keras.utils as kutils
datasetid = self.visualizationJson['datasetid']
self.log.info('\\n================== Data Profiling Details==================')
datacolumns=list(self.dataframe.columns)
self.log.info('================== Data Profiling Details End ==================\\n')
self.log.info('================== Features Correlation Details ==================\\n')
self.log.info('\\n================== Model Performance Analysis ==================')
if os.path.exists(self.pred_filename):
try:
status,df=read_df_compressed(self.pred_filename)
if self.modeltype == 'Classification' or self.modeltype == 'ImageClassification' or self.modeltype == 'anomaly_detection':
y_actual = df['actual'].values
y_predict = df['predict'].values
y_actual = kutils.to_categorical(y_actual)
y_predict = kutils.to_categorical(y_predict)
classes = df.actual.unique()
n_classes = y_actual.shape[1]
self.log.info('-------> ROC AUC CURVE')
roc_curve_dict = []
for i in classes:
try:
classname = i
if str(self.labelMaps) != '{}':
inv_map = {v: k for k, v in self.labelMaps.items()}
classname = inv_map[i]
fpr, tpr, threshold = metrics.roc_curve(y_actual[:,i],y_predict[:,i])
roc_auc = metrics.auc(fpr, tpr)
class_roc_auc_curve = {}
class_roc_auc_curve['class'] = str(classname)
fprstring = ','.join(str(v) for v in fpr)
tprstring = ','.join(str(v) for v in tpr)
class_roc_auc_curve['FP'] = str(fprstring)
class_roc_auc_curve['TP'] = str(tprstring)
roc_curve_dict.append(class_roc_auc_curve)
self.log.info('----------> Class: '+str(classname))
self.log.info('------------> ROC_AUC: '+str(roc_auc))
self.log.info('------------> False Positive Rate (x Points): '+str(fpr))
self.log.info('------------> True Positive Rate (y Points): '+str(tpr))
except:
pass
self.displayjson['ROC_AUC_CURVE'] = roc_curve_dict
self.log.info('-------> Precision Recall CURVE')
precision_recall_curve_dict = []
for i in range(n_classes):
try:
lr_precision, lr_recall, threshold = metrics.precision_recall_curve(y_actual[:,i],y_predict[:,i])
classname = i
if str(self.labelMaps) != '{}':
inv_map = {v: k for k, v in self.labelMaps.items()}
classname = inv_map[i]
roc_auc = metrics.auc(lr_recall,lr_precision)
class_precision_recall_curve = {}
class_precision_recall_curve['class'] = str(classname)
Precisionstring = ','.join(str(round(v,2)) for v in lr_precision)
Recallstring = ','.join(str(round(v,2)) for v in lr_recall)
class_precision_recall_curve['Precision'] = str(Precisionstring)
class_precision_recall_curve['Recall'] = str(Recallstring)
precision_recall_curve_dict.append(class_precision_recall_curve)
except:
pass
self.log.info('----------> Class: '+str(classname))
self.log.info('------------> ROC_AUC: '+str(roc_auc))
self.log.info('------------> Recall (x Points): '+str(lr_precision))
self.log.info('------------> Precision (y Points): '+str(lr_recall))
self.displayjson['PRECISION_RECALL_CURVE'] = precision_recall_curve_dict
status,predictdataFrame=read_df_compressed(self.displayjson['predictedData'])
except Exception as e:
self.log.info('================== Error in Calculation ROC_AUC/Recall Precision Curve '+str(e))
self.log.info('================== Model Performance Analysis End ==================\\n')
self.log.info('\\n================== For Descriptive Analysis of Model Features ==================')
outputfile = os.path.join(self.jsondeployPath,'etc','display.json')
with open(outputfile, 'w') as fp:
json.dump(self.displayjson, fp)
self.log.info('================== For Descriptive Analysis of Model Features End ==================\\n')
except Exception as inst:
self.log.info('Visualization Failed !....'+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def drawlinechart(self,xcolumn,ycolumn,deploy_path,datasetid):
title = 'aion_visualization_'+xcolumn+"_"+ycolumn+"_linechart"
yaxisname = 'Average '+ycolumn
datasetindex = datasetid
visulizationjson = '[{"_id": "543234","_type": "visualization","_source": {"title": "'+title+'",'
visulizationjson = visulizationjson+'"visState": "{\\\\"title\\\\":\\\\"'+title+'\\\\",'
visulizationjson = visulizationjson+'\\\\"type\\\\":\\\\"line\\\\",\\\\"params\\\\":{\\\\"type\\\\":\\\\"line\\\\",\\\\"grid\\\\":{\\\\"categoryLines\\\\":false,\\\\"style\\\\":{\\\\"color\\\\":\\\\"#eee\\\\"}},\\\\"categoryAxes\\\\":[{\\\\"id\\\\":\\\\"CategoryAxis-1\\\\",\\\\"type\\\\":\\\\"category\\\\",\\\\"position\\\\":\\\\"bottom\\\\",\\\\"show\\\\":true,\\\\"style\\\\":{},\\\\"scale\\\\":{\\\\"type\\\\":\\\\"linear\\\\"},\\\\"labels\\\\":{\\\\"show\\\\":true,\\\\"truncate\\\\":100},\\\\"title\\\\":{}}],\\\\"valueAxes\\\\":[{\\\\"id\\\\":\\\\"ValueAxis-1\\\\",\\\\"name\\\\":\\\\"LeftAxis-1\\\\",\\\\"type\\\\":\\\\"value\\\\",\\\\"position\\\\":\\\\"left\\\\",\\\\" |
show\\\\":true,\\\\"style\\\\":{},\\\\"scale\\\\":{\\\\"type\\\\":\\\\"linear\\\\",\\\\"mode\\\\":\\\\"normal\\\\"},\\\\"labels\\\\":{\\\\"show\\\\":true,\\\\"rotate\\\\":0,\\\\"filter\\\\":false,\\\\"truncate\\\\":100},\\\\"title\\\\":'
visulizationjson = visulizationjson+'{\\\\"text\\\\":\\\\"'+yaxisname+'\\\\"}}],\\\\"seriesParams\\\\":[{\\\\"show\\\\":\\\\"true\\\\",\\\\"type\\\\":\\\\"line\\\\",\\\\"mode\\\\":\\\\"normal\\\\",\\\\"data\\\\":'
visulizationjson = visulizationjson+'{\\\\"label\\\\":\\\\"'+yaxisname+'\\\\",\\\\"id\\\\":\\\\"1\\\\"},\\\\"valueAxis\\\\":\\\\"ValueAxis-1\\\\",\\\\"drawLinesBetweenPoints\\\\":true,\\\\"showCircles\\\\":true}],\\\\"addTooltip\\\\":true,\\\\"addLegend\\\\":true,\\\\"legendPosition\\\\":\\\\"right\\\\",\\\\"times\\\\":[],\\\\"addTimeMarker\\\\":false},\\\\"aggs\\\\":[{\\\\"id\\\\":\\\\"1\\\\",\\\\"enabled\\\\":true,\\\\"type\\\\":\\\\"avg\\\\",\\\\"schema\\\\":\\\\"metric\\\\",\\\\"params\\\\":{\\\\"field\\\\":\\\\"'+str(ycolumn)+'\\\\"}},{\\\\"id\\\\":\\\\"2\\\\",\\\\"enabled\\\\":true,\\\\"type\\\\":\\\\"terms\\\\",\\\\"schema\\\\":\\\\"segment\\\\",\\\\"params\\\\":{\\\\"field\\\\":\\\\"'+xcolumn+'\\\\",\\\\"size\\\\":100,\\\\"order\\\\":\\\\"desc\\\\",\\\\"orderBy\\\\":\\\\"1\\\\",\\\\"otherBucket\\\\":false,\\\\"otherBucketLabel\\\\":\\\\"Other\\\\",\\\\"missingBucket\\\\":false,\\\\"missingBucketLabel\\\\":\\\\"Missing\\\\"}}]}","uiStateJSON": "{}", "description": "","version": 1,"kibanaSavedObjectMeta": {"searchSourceJSON": "{\\\\"index\\\\":\\\\"'+datasetindex+'\\\\",\\\\"query\\\\":{\\\\"query\\\\":\\\\"\\\\",\\\\"language\\\\":\\\\"lucene\\\\"},\\\\"filter\\\\":[]}"}},"_migrationVersion": {"visualization": "6.7.2"}}]'
filename = deploy_path+title+'.json'
f = open(filename, "w")
f.write(str(visulizationjson))
f.close()
def drawbarchart(self,xcolumn,ycolumn,deploy_path,datasetid):
title = 'aion_visualization_'+xcolumn+"_"+ycolumn+"_barchart"
yaxisname = 'Average '+ycolumn
datasetindex = datasetid
visulizationjson = '[{"_id": "123456","_type": "visualization","_source": {"title":"'+title+'",'
visulizationjson = visulizationjson+'"visState": "{\\\\"title\\\\":\\\\"'+title+'\\\\",'
visulizationjson = visulizationjson+'\\\\"type\\\\":\\\\"histogram\\\\",\\\\"params\\\\":{\\\\"addLegend\\\\":true,\\\\"addTimeMarker\\\\":false,\\\\"addTooltip\\\\":true,\\\\"categoryAxes\\\\":[{\\\\"id\\\\":\\\\"CategoryAxis-1\\\\",\\\\"labels\\\\":{\\\\"show\\\\":true,\\\\"truncate\\\\":100},\\\\"position\\\\":\\\\"bottom\\\\",\\\\"scale\\\\":{\\\\"type\\\\":\\\\"linear\\\\"},\\\\"show\\\\":true,\\\\"style\\\\":{},\\\\"title\\\\":{},\\\\"type\\\\":\\\\"category\\\\"}],\\\\"grid\\\\":{\\\\"categoryLines\\\\":false,\\\\"style\\\\":{\\\\"color\\\\":\\\\"#eee\\\\"}},\\\\"legendPosition\\\\":\\\\"right\\\\",\\\\"seriesParams\\\\":[{\\\\"data\\\\":{\\\\"id\\\\":\\\\"1\\\\",'
visulizationjson = visulizationjson+'\\\\"label\\\\":\\\\"'+yaxisname+'\\\\"},'
visulizationjson = visulizationjson+'\\\\"drawLinesBetweenPoints\\\\":true,\\\\"mode\\\\":\\\\"stacked\\\\",\\\\"show\\\\":\\\\"true\\\\",\\\\"showCircles\\\\":true,\\\\"type\\\\":\\\\"histogram\\\\",\\\\"valueAxis\\\\":\\\\"ValueAxis-1\\\\"}],\\\\"times\\\\":[],\\\\"type\\\\":\\\\"histogram\\\\",\\\\"valueAxes\\\\":[{\\\\"id\\\\":\\\\"ValueAxis-1\\\\",\\\\"labels\\\\":{\\\\"filter\\\\":false,\\\\"rotate\\\\":0,\\\\"show\\\\":true,\\\\"truncate\\\\":100},\\\\"name\\\\":\\\\"LeftAxis-1\\\\",\\\\"position\\\\":\\\\"left\\\\",\\\\"scale\\\\":{\\\\"mode\\\\":\\\\"normal\\\\",\\\\"type\\\\":\\\\"linear\\\\"},\\\\"show\\\\":true,\\\\"style\\\\":{},\\\\"title\\\\":'
visulizationjson = visulizationjson+'{\\\\"text\\\\":\\\\"'+yaxisname+'\\\\"},'
visulizationjson = visulizationjson+'\\\\"type\\\\":\\\\"value\\\\"}]},\\\\"aggs\\\\":[{\\\\"id\\\\":\\\\"1\\\\",\\\\"enabled\\\\":true,\\\\"type\\\\":\\\\"avg\\\\",\\\\"schema\\\\":\\\\"metric\\\\",\\\\"params\\\\":{\\\\"field\\\\":\\\\"'+str(xcolumn)+'\\\\"}},{\\\\"id\\\\":\\\\"2\\\\",\\\\"enabled\\\\":true,\\\\"type\\\\":\\\\"terms\\\\",\\\\"schema\\\\":\\\\"segment\\\\",\\\\"params\\\\":{\\\\"field\\\\":\\\\"'+ycolumn+'\\\\",\\\\"size\\\\":100,\\\\"order\\\\":\\\\"asc\\\\",\\\\"orderBy\\\\":\\\\"1\\\\",\\\\"otherBucket\\\\":false,\\\\"otherBucketLabel\\\\":\\\\"Other\\\\",\\\\"missingBucket\\\\":false,\\\\"missingBucketLabel\\\\":\\\\"Missing\\\\"}}]}","uiStateJSON":"{}","description": "","version": 1,"kibanaSavedObjectMeta": {'
visulizationjson = visulizationjson+'"searchSourceJSON": "{\\\\"index\\\\":\\\\"'+datasetindex+'\\\\",\\\\"query\\\\":{\\\\"language\\\\":\\\\"lucene\\\\",\\\\"query\\\\":\\\\"\\\\"},\\\\"filter\\\\":[]}"}},"_migrationVersion":{"visualization": "6.7.2"}}]'
filename = deploy_path+title+'.json'
f = open(filename, "w")
f.write(str(visulizationjson))
f.close()
def drawpiechart(self,xcolumn,deploy_path,datasetid):
title = 'aion_visualization_'+xcolumn+"_piechart"
datasetindex = datasetid
visulizationjson = '[{"_id": "123456","_type": "visualization","_source": {"title":"'+title+'",'
visulizationjson = visulizationjson+'"visState": "{\\\\"title\\\\":\\\\"'+title+'\\\\",'
visulizationjson = visulizationjson+'\\\\"type\\\\":\\\\"pie\\\\",\\\\"params\\\\":{\\\\"type\\\\":\\\\"pie\\\\",\\\\"addTooltip\\\\":true,\\\\"addLegend\\\\":true,\\\\"legendPosition\\\\":\\\\"right\\\\",\\\\"isDonut\\\\":true,\\\\"labels\\\\":{\\\\"show\\\\":false,\\\\"values\\\\":true,\\\\"last_level\\\\":true,\\\\"truncate\\\\":100}},\\\\"aggs\\\\":[{\\\\"id\\\\":\\\\"1\\\\",\\\\"enabled\\\\":true,\\\\"type\\\\":\\\\"count\\\\",\\\\"schema\\\\":\\\\"metric\\\\",\\\\"params\\\\":{}},{\\\\"id\\\\":\\\\"2\\\\",\\\\"enabled\\\\":true,\\\\"type\\\\":\\\\"terms\\\\",\\\\"schema\\\\":\\\\"segment\\\\",\\\\"params\\\\":{\\\\"field\\\\":\\\\"'+xcolumn+'\\\\",\\\\"size\\\\":100,\\\\"order\\\\":\\\\"asc\\\\",\\\\"orderBy\\\\":\\\\"1\\\\",\\\\"otherBucket\\\\":false,\\\\"otherBucketLabel\\\\":\\\\"Other\\\\",\\\\"missingBucket\\\\":false,\\\\"missingBucketLabel\\\\":\\\\"Missing\\\\"}}]}",'
visulizationjson = visulizationjson+'"uiStateJSON": "{}","description": "","version": 1,"kibanaSavedObjectMeta": {"searchSourceJSON":"{\\\\"index\\\\":\\\\"'+datasetid+'\\\\",\\\\"query\\\\":{\\\\"query\\\\":\\\\"\\\\",\\\\"language\\\\":\\\\"lucene\\\\"},\\\\"filter\\\\":[]}"}},"_migrationVersion": {"visualization": "6.7.2"}}]'
filename = deploy_path+title+'.json'
f = open(filename, "w")
f.write(str(visulizationjson))
f.close()
def get_confusion_matrix(self,df):
setOfyTrue = set(df['actual'])
unqClassLst = list(setOfyTrue)
if(str(self.labelMaps) != '{}'):
inv_mapping_dict = {v: k for k, v in self.labelMaps.items()}
unqClassLst2 = (pd.Series(unqClassLst)).map(inv_mapping_dict)
unqClassLst2 = list(unqClassLst2)
else:
unqClassLst2 = unqClassLst
indexName = []
columnName = []
for item in unqClassLst2:
indexName.append("act:"+str(item))
columnName.append("pre:"+str(item))
result = pd.DataFrame(confusion_matrix(df['actual'], df['predict'], labels = unqClassLst),index = indexName, columns = columnName)
resultjson = result.to_json(orient='index')
return(resultjson)
def DistributionFinder(self,data):
try:
distributionName =""
sse =0.0
KStestStatic=0.0
dataType=""
if(data.dtype == "float64"):
dataType ="Continuous"
elif(data.dtype =="int" or data.dtype =="int64"):
dataType="Discrete"
if(dataType == "Discrete"):
distributions= [st.bernoulli,st.binom,st.geom,st.nbinom,st.poisson]
index, counts = np.unique(abs(data.astype(int)),return_counts=True)
if(len(index)>=2):
best_sse = np.inf
y1=[]
total=sum(counts)
mean=float(sum(index*counts))/total
variance=float((sum(index**2*counts) -total*mean**2))/(total-1)
dispersion=mean/float(variance)
theta=1/float(dispersion)
r=mean*(float(theta)/1-theta)
for j in counts:
y1.append(float(j)/total)
pmf1=st.bernoulli.pmf(index,mean)
pmf2=st.binom.pmf(index,len(index),p=mean/len(index))
pmf3=st.geom.pmf(index,1/float(1+mean))
pmf4=st.nbinom.pmf(index,mean,r)
pmf5=st.poisson.pmf(index,mean)
sse1 = np.sum(np.power(y1 - pmf1, 2.0))
sse2 = np.sum(np.power(y1 - pmf2, 2.0))
sse3 = np.sum(np.power(y1 - pmf3, 2.0))
sse4 = np.sum(np.power(y1 - pmf4, 2.0))
sse5 = np.sum(np.power(y1- pmf5, 2.0))
sselist=[sse1,sse2,sse3,sse4,sse5]
for i in range(0,len(sselist)):
if best_sse > sselist[i] > 0:
best_distribution = distributions[i].name
best_sse = sselist[i]
elif (len(index) == 1):
best_distribution = "Constant Data-No Distribution"
best_sse = 0.0
distributionName =best_distribution
sse=best_sse
elif(dataType == "Continuous"):
distributions = [st.uniform,st.expon,st.weibull_max,st.weibull_min,st.chi,st.norm,st.lognorm,st.t,st.gamma,st.beta]
best_distribution = st.norm.name
best_sse = np.inf
datamin=data.min()
datamax=data.max()
nrange=datamax-datamin
y, x = np.histogram(data.astype(float), bins='auto', density=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
for distribution in distributions:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
params = distribution.fit(data.astype(float))
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Calculate fitted PDF and error with fit in distribution
pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)
sse = np.sum(np.power(y - pdf, 2.0))
if(best_sse >sse > 0):
best_distribution = distribution.name
best_sse = sse
distributionName =best_distribution
sse=best_sse
except:
response = str(sys.exc_info()[0])
message='Job has Failed'+response
print(message)
return distributionName,sse
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited |
unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import pandas as pd
import matplotlib.pyplot as plt
from lifelines import KaplanMeierFitter, CoxPHFitter
from lifelines.utils import datetimes_to_durations
import logging
import numpy as np
import re
import sys
import os
class SurvivalAnalysis(object):
def __init__(self, df, pipe, method, event_column, duration_column, filterExpression, train_features_type,start=None, end=None):
pd.options.display.width = 30
self.df = df
self.pipe = pipe
self.train_features_type = train_features_type
self.filterExpression = filterExpression
self.covariateExpression = filterExpression
self.method = method
self.event_column = event_column
if start is not None and end is not None:
self.df['duration'], _ = datetimes_to_durations(start, end)
self.duration_column = 'duration'
else:
self.duration_column = duration_column
self.models = []
self.score = 0
self.log = logging.getLogger('eion')
self.plots = []
def transform_filter_expression(self, covariate, covariate_input):
'''
Filter expression given by user will be encoded if it is categorical and if it is a numerical feature that
is normalised in data profiler, in filter expression feature also it will be converted to normalised value
'''
cols = list(self.df.columns)
if self.duration_column in cols:
cols.remove(self.duration_column)
if self.event_column in cols:
cols.remove(self.event_column)
df_filter = pd.DataFrame([{covariate:covariate_input}], columns=cols)
df_filter[covariate] = df_filter[covariate].astype(self.train_features_type[covariate])
df_transform_array = self.pipe.transform(df_filter)
df_transform = pd.DataFrame(df_transform_array, columns=cols)
return df_transform[covariate].iloc[0]
def learn(self):
self.log.info('\\n---------- SurvivalAnalysis learner has started ----------')
self.log.info('\\n---------- SurvivalAnalysis learner method is "%s" ----------' % self.method)
if self.method.lower() in ['kaplanmeierfitter', 'kaplanmeier', 'kaplan-meier', 'kaplan meier', 'kaplan', 'km',
'kmf']:
self.log.info('\\n---------- SurvivalAnalysis learner method "%s" has started ----------' % self.method)
kmf = KaplanMeierFitter()
T = self.df[self.duration_column]
E = self.df[self.event_column]
self.log.info('\\n T : \\n%s' % str(T))
self.log.info('\\n E : \\n%s' % str(E))
K = kmf.fit(T, E)
kmf_sf = K.survival_function_
kmf_sf_json = self.survival_probability_to_json(kmf_sf)
self.models.append(K)
if isinstance(self.filterExpression, str):
df_f, df_n, refined_filter_expression = self.parse_filterExpression()
kmf1 = KaplanMeierFitter()
kmf2 = KaplanMeierFitter()
self.log.info(
'\\n---------- SurvivalAnalysis learner "%s" fitting for filter expression has started----------' % self.method)
T1 = df_f[self.duration_column]
E1 = df_f[self.event_column]
T2 = df_n[self.duration_column]
E2 = df_n[self.event_column]
kmf1.fit(T1, E1)
fig, ax = plt.subplots(1, 1)
ax = kmf1.plot_survival_function(ax=ax, label='%s' % refined_filter_expression)
self.log.info(
'\\n---------- SurvivalAnalysis learner "%s" fitting for filter expression has ended----------' % self.method)
plt.title("KM Survival Functions - Filter vs Negation")
self.log.info(
'\\n---------- SurvivalAnalysis learner "%s" fitting for negation has started----------' % self.method)
kmf2.fit(T2, E2)
ax = kmf2.plot_survival_function(ax=ax, label='~%s' % refined_filter_expression)
self.log.info(
'\\n---------- SurvivalAnalysis learner "%s" fitting for negation has ended----------' % self.method)
self.models.extend([kmf1, kmf2])
kmf1_sf = kmf1.survival_function_
kmf2_sf = kmf2.survival_function_
kmf1_sf_json = self.survival_probability_to_json(kmf1_sf)
self.plots.append(fig)
self.log.info('\\n---------- SurvivalAnalysis learner method "%s" has ended ----------' % self.method)
self.log.info('\\n---------- SurvivalAnalysis learner has ended ----------')
self.log.info('Status:- |... Algorithm applied: KaplanMeierFitter')
return kmf1_sf_json
else:
fig, ax = plt.subplots(1, 1)
ax = kmf_sf.plot(ax=ax)
plt.title("KM Survival Functions")
self.plots.append(fig)
self.log.info('\\n---------- SurvivalAnalysis learner method "%s" has ended ----------' % self.method)
self.log.info('\\n---------- SurvivalAnalysis learner has ended ----------')
self.log.info('Status:- |... Algorithm applied: KaplanMeierFitter')
return kmf_sf_json
elif self.method.lower() in ['coxphfitter', 'coxregression', 'cox-regression', 'cox regression',
'coxproportionalhazard', 'coxph', 'cox', 'cph']:
self.log.info('\\n---------- SurvivalAnalysis learner method "%s" has started ----------' % self.method)
cph = CoxPHFitter(penalizer=0.1)
self.df = self.drop_constant_features(self.df)
C = cph.fit(self.df, self.duration_column, self.event_column)
self.models.append(C)
cph_sf = C.baseline_survival_
self.score = C.score(self.df, scoring_method="concordance_index")
self.log.info(
'\\n---------- SurvivalAnalysis learner "%s" score is "%s"----------' % (self.method, str(self.score)))
cph_sf_json = self.survival_probability_to_json(cph_sf)
if isinstance(self.covariateExpression, str):
covariate, covariate_inputs, covariate_values = self.parse_covariateExpression()
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.tight_layout()
ax1 = C.plot(ax=ax1, hazard_ratios=True)
self.log.info('\\n Summary : \\n%s' % str(C.summary))
ax1.set_title("COX hazard ratio")
ax2 = C.plot_partial_effects_on_outcome(covariate, covariate_values, ax=ax2)
mylabels = [covariate + '=' + str(x) for x in covariate_inputs]
mylabels.append('baseline')
ax2.legend(labels=mylabels)
ax2.set_title("Covariate Plot")
self.plots.append(fig)
else:
fig = plt.figure()
ax1 = C.plot(hazard_ratios=True)
self.log.info('\\n Summary : \\n%s' % str(C.summary))
plt.title("COX hazard ratio")
self.plots.append(fig)
self.log.info('\\n---------- SurvivalAnalysis learner method "%s" has ended ----------' % self.method)
self.log.info('\\n---------- SurvivalAnalysis learner has ended ----------')
self.log.info('Status:- |... Algorithm applied: CoxPHFitter')
return cph_sf_json
def parse_filterExpression(self):
import operator
self.log.info('\\n---------- Filter Expression parsing has started ----------')
self.log.info('Filter Expression provided : %s' % self.filterExpression)
self.log.info('Shape before filter : %s' % str(self.df.shape))
f = self.filterExpression.split('&')
f = list(filter(None, f))
if len(f) == 1:
p = '[<>=!]=?'
op = re.findall(p, self.filterExpression)[0]
covariate, covariate_input = [x.strip().strip('\\'').strip('\\"') for x in self.filterExpression.split(op)]
refined_filter_expression = covariate + op + covariate_input
self.log.info('Final refined filter : %s' % refined_filter_expression)
ops = {"==": operator.eq, ">": operator.gt, "<": operator.lt, ">=": operator.ge, "<=": operator.le,
"!=": operator.ne}
try:
fv = self.transform_filter_expression(covariate, covariate_input)
df_f = self.df[ops[op](self.df[covariate], fv)]
self.log.info('Shape after filter : %s' % str(df_f.shape))
df_n = self.df[~self.df[covariate].isin(df_f[covariate])]
self.log.info('Shape of negation : %s' % str(df_n.shape))
self.log.info('---------- Filter Expression has ended ----------')
return df_f, df_n, refined_filter_expression
except Exception:
self.log.info('\\n-----> Filter Expression parsing encountered error!!!')
exc_type, exc_obj, exc_tb = sys.exc_info()
if exc_type == IndexError or ValueError or KeyError:
self.log.info('----->Given filter expression '+ self.filterExpression +' is invalid')
self.log.info('Valid examples are "A>100", "B==category1", "C>=10 && C<=20" etc..')
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname) + ' ' + str(exc_tb.tb_lineno))
raise Exception(str(exc_type)+str(exc_obj))
else:
full_f = []
try:
for filterExpression in f:
p = '[<>=!]=?'
op = re.findall(p, filterExpression)[0]
covariate, covariate_input = [x.strip().strip('\\'').strip('\\"') for x in filterExpression.split(op)]
full_f.append(covariate + op + covariate_input)
ops = {"==": operator.eq, ">": operator.gt, "<": operator.lt, ">=": operator.ge, "<=": operator.le,
"!=": operator.ne}
fv = self.transform_filter_expression(covariate, covariate_input)
df_f = self.df[ops[op](self.df[covariate], fv)]
df_n = self.df[~self.df[covariate].isin(df_f[covariate])]
refined_filter_expression = " & ".join(full_f)
self.log.info('Final refined filter : %s' % refined_filter_expression)
self.log.info('Shape after filter : %s' % str(df_f.shape))
self.log.info('Shape of negation : %s' % str(df_n.shape))
self.log.info('---------- Filter Expression has ended ----------')
return df_f, df_n, refined_filter_expression
# except (IndexError, ValueError, KeyError):
except Exception:
self.log.info('\\n-----> Filter Expression parsing encountered error!!!')
exc_type, exc_obj, exc_tb = sys.exc_info()
if exc_type == IndexError or ValueError or KeyError:
self.log.info('----->Given filter expression '+ self.filterExpression +' is invalid')
self.log.info('Valid examples are "A>100", "B==category1", "C>=10 && C<=20" etc..')
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname) + ' ' + str(exc_tb.tb_lineno))
raise Exception(str(exc_type)+str(exc_obj))
def parse_covariateExpression(self):
self.log.info('\\n---------- Covariate Expression parsing has started ----------')
self.log.info('\\n Covariate Expression provided : %s' % self.covariateExpression)
import ast
p = '[=:]'
try:
op = re.findall(p, self.covariateExpression)[0]
covariate, covariate_inputs = [x.strip().strip('\\'').strip('\\"') for x in
self.covariateExpression.split(op)]
covariate_inputs = ast.literal_eval(covariate_inputs)
covariate_values = [self.transform_filter_expression(covariate, x) for x in covariate_inputs]
self.log.info('\\n---------- Covariate Expression parsing has ended ----------')
return covariate, covariate_inputs, covariate_values
except Exception:
self.log.info('\\n-----> Covariate Expression parsing encountered error!!!')
exc_type, exc_obj, exc_tb = sys.exc_info()
if exc_type == IndexError or ValueError or KeyError:
self.log.info('----->Given covariate expression '+ self.filterExpression +' is invalid')
self.log.info("\\n Valid examples are A=['Yes','No'] or B=[100,500,1000]")
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname) + ' ' + str(exc_tb.tb_lineno))
raise Exception(str(exc_type)+str(exc_obj))
def |
survival_probability_to_json(self, sf):
'''
sf = Survival function i.e. KaplanMeierFitter.survival_function_ or CoxPHFitter.baseline_survival_
returns json of survival probabilities
'''
sf = sf[sf.columns[0]].apply(lambda x: "%4.2f" % (x * 100))
self.log.info('\\n Survival probabilities : \\n%s' % str(sf))
sf = sf.reset_index()
sf = sf.sort_values(sf.columns[0])
sf_json = sf.to_json(orient='records')
self.log.info('\\n Survival probability json : \\n%s' % str(sf_json))
return sf_json
def drop_constant_features(self, df):
dropped = []
for col in df.columns:
if (len(df[col].unique()) == 1) and (col not in [self.duration_column, self.event_column]):
df.drop(col, inplace=True, axis=1)
dropped.append(col)
if len(dropped) != 0:
self.log.info('\\n Dropping constant features %s' % str(col))
self.log.info('\\n After dropping constant features : \\n%s' % str(df))
return df
def predict(self):
if self.method == 'KaplanMeierFitter':
return self.model.predict(self.test[self.duration_column])
elif self.method == 'CoxPHFitter':
res = []
for idx, row in self.test.iterrows():
res.append(
self.model.predict_survival_function(self.test, times=row[self.model.duration_col])[idx].values[0])
return pd.DataFrame(res)
<s><s> import os
import traceback
import sys
print("before function process")
def process(version):
print("inside fun process")
currentDirectory = os.path.dirname(os.path.abspath(__file__))
print(currentDirectory)
try:
from os.path import expanduser
import platform
import subprocess
import sys
import demoji
try:
print('Downloading NLTK additional packages...')
import nltk
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')
except Exception as e:
print('NLTK Error: '+str(e))
pass
from appbe.dataPath import DATA_DIR
import shutil
import importlib
license_path = DATA_DIR
if os.path.isdir(license_path) == False:
os.makedirs(license_path)
import warnings
warnings.filterwarnings("ignore")
LicenseFolder = os.path.join(license_path,'License')
if os.path.isdir(LicenseFolder) == False:
os.makedirs(LicenseFolder)
sqlite_path = os.path.join(license_path,'sqlite')
if os.path.isdir(sqlite_path) == False:
os.makedirs(sqlite_path)
pretrainedModel_path = os.path.join(license_path,'PreTrainedModels')
if os.path.isdir(pretrainedModel_path) == False:
os.makedirs(pretrainedModel_path)
config_path = os.path.join(license_path,'config')
if os.path.isdir(config_path) == False:
os.makedirs(config_path)
target_path = os.path.join(license_path,'target')
if os.path.isdir(target_path) == False:
os.makedirs(target_path)
data_path = os.path.join(license_path,'storage')
if os.path.isdir(data_path) == False:
os.makedirs(data_path)
log_path = os.path.join(license_path,'logs')
if os.path.isdir(log_path) == False:
os.makedirs(log_path)
configFolder = os.path.join(currentDirectory,'..','config')
for file in os.listdir(configFolder):
if file.endswith(".var"):
os.remove(os.path.join(configFolder,file))
versionfile = os.path.join(configFolder,str(version)+'.var')
with open(versionfile, 'w') as fp:
pass
manage_path = os.path.join(currentDirectory,'..','aion.py')
print('Setting up Django Environment for AION User Interface')
proc = subprocess.Popen([sys.executable, manage_path, "-m","migrateappfe"],stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
if proc.returncode != 0:
err_string = stderr.decode('utf8')
import re
result = re.search("No module named '(.*)'", err_string)
if 'ModuleNotFoundError' in err_string:
print('\\n"{}" module is missing. The dependencies of AION were not installed properly. Uninstall and reinstall AION'.format(result.group(1)))
else:
print('\\nThe dependencies of AION were not installed properly. Uninstall and reinstall AION')
raise Exception(err_string)
else:
print('AION User Interface successfully set')
print('--------------AION Installed Successfully--------------')
except Exception as e:
print(e)
f = open(os.path.join(currentDirectory, 'workspace_error_logs.txt'), "w")
f.write(str(traceback.format_exc()))
f.close()
pass
if __name__ == "__main__":
process(sys.argv[1])<s> import os
import traceback
def process(version):
currentDirectory = os.path.dirname(os.path.abspath(__file__))
try:
import win32com.client
from os.path import expanduser
import platform
import subprocess
import sys
import demoji
try:
print('Downloading NLTK additional packages...')
import nltk
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')
except Exception as e:
print('NLTK Error: '+str(e))
pass
from appbe.dataPath import DATA_DIR
from win32com.shell import shell, shellcon
import shutil
import importlib
license_path = DATA_DIR
if os.path.isdir(license_path) == False:
os.makedirs(license_path)
import warnings
warnings.filterwarnings("ignore")
LicenseFolder = os.path.join(license_path,'License')
if os.path.isdir(LicenseFolder) == False:
os.makedirs(LicenseFolder)
sqlite_path = os.path.join(license_path,'sqlite')
if os.path.isdir(sqlite_path) == False:
os.makedirs(sqlite_path)
pretrainedModel_path = os.path.join(license_path,'PreTrainedModels')
if os.path.isdir(pretrainedModel_path) == False:
os.makedirs(pretrainedModel_path)
config_path = os.path.join(license_path,'config')
if os.path.isdir(config_path) == False:
os.makedirs(config_path)
target_path = os.path.join(license_path,'target')
if os.path.isdir(target_path) == False:
os.makedirs(target_path)
data_path = os.path.join(license_path,'storage')
if os.path.isdir(data_path) == False:
os.makedirs(data_path)
log_path = os.path.join(license_path,'logs')
if os.path.isdir(log_path) == False:
os.makedirs(log_path)
configFolder = os.path.join(currentDirectory,'..','config')
for file in os.listdir(configFolder):
if file.endswith(".var"):
os.remove(os.path.join(configFolder,file))
versionfile = os.path.join(configFolder,str(version)+'.var')
with open(versionfile, 'w') as fp:
pass
manage_path = os.path.join(currentDirectory,'..','aion.py')
print('Setting up Django Environment for AION User Interface')
proc = subprocess.Popen([sys.executable, manage_path, "-m","migrateappfe"],stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
if proc.returncode != 0:
err_string = stderr.decode('utf8')
import re
result = re.search("No module named '(.*)'", err_string)
if 'ModuleNotFoundError' in err_string:
print('\\n"{}" module is missing. The dependencies of AION were not installed properly. Uninstall and reinstall AION'.format(result.group(1)))
else:
print('\\nThe dependencies of AION were not installed properly. Uninstall and reinstall AION')
raise Exception(err_string)
else:
print('AION User Interface successfully set')
desktop = shell.SHGetFolderPath (0, shellcon.CSIDL_DESKTOP, 0, 0)
#desktop = os.path.expanduser('~/Desktop')
path = os.path.join(desktop, 'Explorer {0}.lnk'.format(version))
target = os.path.normpath(os.path.join(currentDirectory,'..', 'sbin', 'AION_Explorer.bat'))
icon = os.path.join(currentDirectory,'icons','aion.ico')
shell = win32com.client.Dispatch("WScript.Shell")
shortcut = shell.CreateShortCut(path)
shortcut.Targetpath = '"'+target+'"'
shortcut.WorkingDirectory = currentDirectory
#shortcut.WorkingDirectory = os.path.dirname(__file__)
shortcut.IconLocation = icon
shortcut.WindowStyle = 1 # 7 - Minimized, 3 - Maximized, 1 - Normal
shortcut.save()
path = os.path.join(desktop, 'Shell {0}.lnk'.format(version))
target = os.path.normpath(os.path.join(currentDirectory,'..','sbin', 'AION_Shell.bat'))
icon = os.path.join(currentDirectory,'icons','aion_shell.ico')
shell = win32com.client.Dispatch("WScript.Shell")
shortcut = shell.CreateShortCut(path)
shortcut.Targetpath = '"'+target+'"'
shortcut.WorkingDirectory = currentDirectory
#shortcut.WorkingDirectory = os.path.dirname(__file__)
shortcut.IconLocation = icon
shortcut.WindowStyle = 1 # 7 - Minimized, 3 - Maximized, 1 - Normal
shortcut.save()
print('--------------AION Installed Successfully--------------')
except Exception as e:
print(e)
f = open(os.path.join(currentDirectory, 'workspace_error_logs.txt'), "w")
f.write(str(traceback.format_exc()))
f.close()
pass
<s><s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> import warnings
import sys
warnings.simplefilter(action='ignore', category=FutureWarning)
import xgboost as xgb
import dask.array as da
import shutil
import dask.distributed
import dask.dataframe as dd
import dask_ml
import logging
from sklearn.metrics import accuracy_score, recall_score, \\
roc_auc_score, precision_score, f1_score, \\
mean_squared_error, mean_absolute_error, \\
r2_score, classification_report, confusion_matrix, \\
mean_absolute_percentage_error
import lightgbm as lgb
import re
from sklearn.pipeline import Pipeline
from sklearn.base import BaseEstimator, TransformerMixin
from dask_ml.impute import SimpleImputer
from dask_ml.compose import ColumnTransformer
from dask_ml.decomposition import TruncatedSVD, PCA
from dask_ml.preprocessing import StandardScaler, \\
MinMaxScaler, \\
OneHotEncoder, LabelEncoder
from dask_ml.wrappers import ParallelPostFit
import numpy as np
import json
import time
from sklearn.ensemble import IsolationForest
import joblib
import pickle as pkl
import os
predict_config={}
dask.config.set({"distributed.workers.memory.terminate": 0.99})
dask.config.set({"array.chunk-size": "128 MiB"})
dask.config.set({"distributed.admin.tick.limit": "3h"})
# dask.config.set({"distributed.workers.memory.pause": 0.9})
class MinImputer(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# to_fillna = ['public_meeting', 'scheme_management', 'permit']
# X[to_fillna] = X[to_fillna].fillna(value='NaN')
# X[to_fillna] = X[to_fillna].astype(str)
X = X.fillna(value=X.min())
# X = X.astype(str)
return X
class MaxImputer(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X = X.fillna(value=X.max())
return X
class DropImputer(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X = X.dropna()
return X
class ModeCategoricalImputer(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X = X.fillna(value=X.mode())
return X
class IsoForestOutlierExtractor(TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X, y):
lcf = IsolationForest()
with joblib.parallel_backend('dask'):
lcf.fit(X)
y_pred_train = lcf.predict(X)
y_pred_train = y_pred_train == 1
return X
def load_config_json(json_file):
with open(json_file |
, 'r') as j:
contents = json.loads(j.read())
return contents
def load_data_dask(data_file, npartitions=500):
big_df = dd.read_csv(data_file, # sep=r'\\s*,\\s*',
assume_missing=True,
parse_dates=True, infer_datetime_format=True,
sample=1000000,
# dtype={'caliper': 'object',
# 'timestamp': 'object'},
# dtype='object',
na_values=['-','?']
)
big_df = big_df.repartition(npartitions)
return big_df
def get_dask_eda(df_dask):
descr = df_dask.describe().compute()
corr = df_dask.corr().compute()
return descr, corr
def normalization(config):
scaler = config["advance"] \\
["profiler"]["normalization"]
scaler_method = None
if scaler["minMax"] == "True":
scaler_method = MinMaxScaler()
if scaler["standardScaler"] == "True":
scaler_method = StandardScaler()
return scaler_method
def categorical_encoding(config):
encoder = config["advance"]["profiler"] \\
["categoryEncoding"]
encoder_method = None
if encoder["OneHotEncoding"] == "True":
encoder_method = OneHotEncoder()
# OneHotEncoder(handle_unknown='ignore', sparse=False)
if encoder["LabelEncoding"] == "True":
encoder_method = LabelEncoder()
return encoder_method
def numeric_feature_imputing(config):
imputer_numeric_method = None
imputer_numeric = config["advance"] \\
["profiler"]["numericalFillMethod"]
if imputer_numeric["Median"] == "True":
print("Median Simple Imputer")
imputer_numeric_method = SimpleImputer(strategy='median')
if imputer_numeric["Mean"] == "True":
print("Mean Simple Imputer")
imputer_numeric_method = SimpleImputer(strategy='mean')
if imputer_numeric["Min"] == "True":
print("Min Simple Imputer")
imputer_numeric_method = MinImputer()
if imputer_numeric["Max"] == "True":
print("Max Simple Imputer")
imputer_numeric_method = MaxImputer()
if imputer_numeric["Zero"] == "True":
print("Zero Simple Imputer")
imputer_numeric_method = SimpleImputer(strategy='constant',
fill_value=0)
# if imputer_numeric["Drop"] == "True":
# print("Median Simple Imputer")
# imputer_numeric_method = DropImputer()
return imputer_numeric_method
def categorical_feature_imputing(config):
imputer_categorical_method = None
imputer_categorical = config["advance"] \\
["profiler"]["categoricalFillMethod"]
if imputer_categorical["MostFrequent"] == "True":
imputer_categorical_method = SimpleImputer(strategy='most_frequent')
if imputer_categorical["Mode"] == "True":
imputer_categorical_method = ModeCategoricalImputer()
if imputer_categorical["Zero"] == "True":
imputer_categorical_method = SimpleImputer(strategy='constant',
fill_value=0)
return imputer_categorical_method
def preprocessing_pipeline(config, X_train):
print("Start preprocessing")
scaler_method = normalization(config)
encoding_method = categorical_encoding(config)
imputer_numeric_method = numeric_feature_imputing(config)
imputer_categorical_method = categorical_feature_imputing(config)
numeric_pipeline = Pipeline(steps=[
('impute', imputer_numeric_method),
('scale', scaler_method)
])
categorical_pipeline = Pipeline(steps=[
('impute', imputer_categorical_method),
('encoding', encoding_method)
])
numerical_features = X_train._get_numeric_data().columns.values.tolist()
categorical_features = list(set(X_train.columns) - set(X_train._get_numeric_data().columns))
print("numerical_features: ", numerical_features)
print("categorical_features: ", categorical_features)
full_processor = ColumnTransformer(transformers=[
('number', numeric_pipeline, numerical_features),
# ('category', categorical_pipeline, categorical_features)
])
return full_processor
def full_pipeline(X_train, X_test, config):
full_processor = preprocessing_pipeline(config, X_train)
reduce_dim = config["advance"] \\
["selector"]["featureEngineering"]
feature_reduce = None
if reduce_dim["SVD"] == "True":
feature_reduce = TruncatedSVD(n_components=3)
if reduce_dim["PCA"] == "True":
feature_reduce = PCA(n_components=3)
X_train = full_processor.fit_transform(X_train)
# joblib.dump(full_processor, 'full_processor_pipeline.pkl')
deploy_location = config["basic"]["modelLocation"]
profiler_file = os.path.join(deploy_location,'model','profiler.pkl')
selector_file = os.path.join(deploy_location,'model','selector.pkl')
save_pkl(full_processor, profiler_file)
X_test = full_processor.transform(X_test)
predict_config['profilerLocation'] = 'profiler.pkl'
if feature_reduce != None:
X_train = feature_reduce.fit_transform(X_train.to_dask_array(lengths=True))
save_pkl(feature_reduce, selector_file)
predict_config['selectorLocation'] = 'selector.pkl'
# joblib.dump(feature_reduce, 'feature_reduce_pipeline.pkl')
X_test = feature_reduce.transform(X_test.to_dask_array(lengths=True))
X_train = dd.from_dask_array(X_train)
X_test = dd.from_dask_array(X_test)
else:
predict_config['selectorLocation'] = ''
return X_train, X_test
def train_xgb_classification(client, X_train, y_train, X_test, config):
print("Training XGBoost classification")
model_hyperparams = config["advance"] \\
["distributedlearner_config"] \\
["modelParams"] \\
["classifierModelParams"] \\
["Distributed Extreme Gradient Boosting (XGBoost)"]
dask_model = xgb.dask.DaskXGBClassifier(
tree_method=model_hyperparams["tree_method"],
n_estimators=int(model_hyperparams["n_estimators"]),
max_depth=int(model_hyperparams["max_depth"]),
gamma=float(model_hyperparams["gamma"]),
min_child_weight=float(model_hyperparams["min_child_weight"]),
subsample=float(model_hyperparams["subsample"]),
colsample_bytree=float(model_hyperparams["colsample_bytree"]),
learning_rate=float(model_hyperparams["learning_rate"]),
reg_alpha=float(model_hyperparams["reg_alpha"]),
reg_lambda=float(model_hyperparams["reg_lambda"]),
random_state=int(model_hyperparams["random_state"]),
verbosity=3)
dask_model.client = client
X_train, X_test = full_pipeline(X_train, X_test, config)
dask_model.fit(X_train, y_train)
save_model(config, dask_model)
save_config(config)
return dask_model, X_train, X_test
def train_xgb_regression(client, X_train, y_train, X_test, config):
model_hyperparams = config["advance"] \\
["distributedlearner_config"] \\
["modelParams"] \\
["regressorModelParams"] \\
["Distributed Extreme Gradient Boosting (XGBoost)"]
print("Training XGBoost regression")
dask_model = xgb.dask.DaskXGBRegressor(
tree_method=model_hyperparams["tree_method"],
n_estimators=int(model_hyperparams["n_estimators"]),
max_depth=int(model_hyperparams["max_depth"]),
gamma=float(model_hyperparams["gamma"]),
min_child_weight=float(model_hyperparams["min_child_weight"]),
subsample=float(model_hyperparams["subsample"]),
colsample_bytree=float(model_hyperparams["colsample_bytree"]),
learning_rate=float(model_hyperparams["learning_rate"]),
reg_alpha=float(model_hyperparams["reg_alpha"]),
reg_lambda=float(model_hyperparams["reg_lambda"]),
random_state=int(model_hyperparams["random_state"]),
verbosity=3)
dask_model.client = client
X_train, X_test = full_pipeline(X_train, X_test, config)
dask_model.fit(X_train, y_train)
# dask_model.fit(X_train, y_train, eval_set=[(X_test, y_test)])
save_model(config, dask_model)
save_config(config)
return dask_model, X_train, X_test
def train_lgbm_regression(client, X_train, y_train, X_test, config):
print("Training lightGBM regression")
model_hyperparams = config["advance"] \\
["distributedlearner_config"] \\
["modelParams"] \\
["regressorModelParams"] \\
["Distributed Light Gradient Boosting (LightGBM)"]
dask_model = lgb.DaskLGBMRegressor(
client=client,
n_estimators=int(model_hyperparams["n_estimators"]),
num_leaves=int(model_hyperparams["num_leaves"]),
max_depth =int(model_hyperparams["max_depth"]),
learning_rate=float(model_hyperparams["learning_rate"]),
min_child_samples=int(model_hyperparams["min_child_samples"]),
reg_alpha=int(model_hyperparams["reg_alpha"]),
subsample=float(model_hyperparams["subsample"]),
reg_lambda=int(model_hyperparams["reg_lambda"]),
colsample_bytree=float(model_hyperparams["colsample_bytree"]),
n_jobs=4,
verbosity=3)
X_train, X_test = full_pipeline(X_train, X_test, config)
# print("before X_train.shape, y_train.shape",
# X_train.shape,
# y_train.shape)
# indices = dask_findiforestOutlier(X_train)
# print("X_train type: ", type(X_train))
# print("y_train type: ", type(y_train))
# X_train, y_train = X_train.iloc[indices, :], \\
# y_train.iloc[indices]
# print("after X_train.shape, y_train.shape",
# X_train.shape,
# y_train.shape)
dask_model.fit(X_train, y_train)
# dask_model.fit(X_train, y_train,
# # eval_set=[(X_test,y_test),
# # (X_train,y_train)],
# verbose=20,eval_metric='l2')
save_model(config, dask_model)
save_config(config)
return dask_model, X_train, X_test
def train_lgbm_classification(client, X_train, y_train, X_test, config):
print("Training lightGBM classification")
model_hyperparams = config["advance"] \\
["distributedlearner_config"] \\
["modelParams"] \\
["classifierModelParams"] \\
["Distributed Light Gradient Boosting (LightGBM)"]
dask_model = lgb.DaskLGBMClassifier(
client=client,
num_leaves=int(model_hyperparams["num_leaves"]),
learning_rate=float(model_hyperparams["learning_rate"]),
feature_fraction=float(model_hyperparams["feature_fraction"]),
bagging_fraction=float(model_hyperparams["bagging_fraction"]),
bagging_freq=int(model_hyperparams["bagging_freq"]),
max_depth=int(model_hyperparams["max_depth"]),
min_data_in_leaf=int(model_hyperparams["min_data_in_leaf"]),
n_estimators=int(model_hyperparams["n_estimators"]),
verbosity=3)
X_train, X_test = full_pipeline(X_train, X_test, config)
dask_model.fit(X_train, y_train)
# dask_model.fit(X_train, y_train,
# eval_set=[(X_test,y_test),
# (X_train,y_train)],
# verbose=20,eval_metric='logloss')
save_model(config, dask_model)
save_config(config)
return dask_model, X_train, X_test
def evaluate_model_classification(model, config, X_test, y_test, class_names):
metrics = config["basic"]["scoringCriteria"]["classification"]
y_test = y_test.to_dask_array().compute()
log = logging.getLogger('eion')
X_test = X_test.to_dask_array(lengths=True)
y_pred = model.predict(X_test)
if metrics["Accuracy"] == "True":
# ParallelPostFit(estimator=model, scoring='accuracy')
# score = model.score(X_test, y_test) * 100.0
score = accuracy_score(y_test, y_pred) * 100.0
type = 'Accuracy'
log.info('Status:-|... Accuracy Score '+str(score))
if metrics["Recall"] == "True":
score = recall_score(y_test, y_pred)
type = 'Recall'
log.info('Status:-|... Recall Score '+str(score))
if metrics["Precision"] == "True":
score = precision_score(y_test, y_pred)
type = 'Precision'
log.info('Status:-|... Precision Score '+str(score))
if metrics["F1_Score"] == "True":
score = f1_score(y_test, y_pred)
|
type = 'F1'
log.info('Status:-|... F1 Score '+str(score))
y_pred_prob = model.predict_proba(X_test)
if len(class_names) == 2:
roc_auc = roc_auc_score(y_test, y_pred)
else:
roc_auc = roc_auc_score(y_test, y_pred_prob, multi_class='ovr')
if metrics["ROC_AUC"] == "True":
score = roc_auc
type = 'ROC_AUC'
log.info('Status:-|... ROC AUC Score '+str(score))
class_report = classification_report(y_test, y_pred, output_dict=True, target_names=class_names)
conf_matrix = confusion_matrix(y_test, y_pred)
return type, score, class_report, conf_matrix, roc_auc
def evaluate_model_regression(model, config, X_test, y_test):
metrics = config["basic"]["scoringCriteria"]["regression"]
y_pred = model.predict(X_test).compute()
y_test = y_test.to_dask_array().compute()
X_test = X_test.to_dask_array(lengths=True)
log = logging.getLogger('eion')
mse = mean_squared_error(y_test, y_pred)
rmse = mean_squared_error(y_test, y_pred, squared=False)
norm_rmse = rmse * 100 / (y_test.max() - y_test.min())
mape = mean_absolute_percentage_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
mae = mean_absolute_error(y_test, y_pred)
if metrics["Mean Squared Error"] == "True":
type = 'Mean Squared Error'
score = mse
log.info('Status:-|... Mean Squared Error '+str(score))
if metrics["Root Mean Squared Error"] == "True":
type = 'Root Mean Squared Error'
score = rmse
log.info('Status:-|... Root Mean Square Error '+str(score))
if metrics["R-Squared"] == "True":
type = 'R-Squared'
score = r2
log.info('Status:-|... R Squared Error '+str(score))
if metrics["Mean Absolute Error"] == "True":
type = 'Mean Absolute Error'
score = mae
log.info('Status:-|... Mean Absolute Error '+str(score))
return type, score, mse, rmse, norm_rmse, r2, mae, mape
def save_config(config):
deploy_location = config["basic"]["modelLocation"]
saved_model_file = os.path.join(deploy_location,'etc','config.json')
print(predict_config)
with open (saved_model_file,'w') as f:
json.dump(predict_config, f)
f.close()
def save_model(config, model):
model_name = config["basic"]["modelName"]
model_version = config["basic"]["modelVersion"]
analysis_type = config["basic"]["analysisType"]
deploy_location = config["basic"]["modelLocation"]
if analysis_type["classification"] == "True":
problem_type = "classification"
if analysis_type["regression"] == "True":
problem_type = "regression"
print("model_name", model_name)
print("model_version", model_version)
print("problem_type", problem_type)
print("deploy_location", deploy_location)
file_name = problem_type + '_' + model_version + ".sav"
saved_model = os.path.join(deploy_location,'model',file_name)
print("Save trained model to directory: ", save_model)
with open (saved_model,'wb') as f:
pkl.dump(model,f)
f.close()
predict_config['modelLocation'] = file_name
def save_pkl(model, filename):
with open(filename, 'wb') as f:
pkl.dump(model, f,
protocol=pkl.HIGHEST_PROTOCOL)
def dask_findiforestOutlier(X):
print("Outlier removal with Isolation Forest...")
isolation_forest = IsolationForest(n_estimators=100)
with joblib.parallel_backend('dask'):
isolation_forest.fit(X)
y_pred_train = isolation_forest.fit_predict(X)
mask_isoForest = y_pred_train != -1
return mask_isoForest
def training(configFile):
start_time = time.time()
config = load_config_json(configFile)
data_dir = config["basic"]["dataLocation"]
n_workers = int(config["advance"]
["distributedlearner_config"]
["n_workers"])
npartitions = int(config["advance"]
["distributedlearner_config"]
["npartitions"])
threads_per_worker = int(config["advance"]
["distributedlearner_config"]
["threads_per_worker"])
predict_config['modelName'] = config["basic"]["modelName"]
predict_config['modelVersion'] = config["basic"]["modelVersion"]
predict_config['targetFeature'] = config["basic"]["targetFeature"]
predict_config['trainingFeatures'] = config["basic"]["trainingFeatures"]
predict_config['dataLocation'] = config["basic"]["dataLocation"]
predict_config['n_workers'] = n_workers
predict_config['npartitions'] = npartitions
predict_config['threads_per_worker'] = threads_per_worker
if config['basic']['analysisType']["classification"] == "True":
problemType = "classification"
oProblemType = "Distributed Classification"
if config['basic']['analysisType']["regression"] == "True":
problemType = "regression"
oProblemType = "Distributed Regression"
predict_config['analysisType'] = problemType
predict_config['scoringCriteria'] = ''
target_feature = config["basic"]["targetFeature"]
training_features = config["basic"]["trainingFeatures"]
deploy_location = config["basic"]["deployLocation"]
is_xgb_class = config["basic"] \\
["algorithms"]["classification"] \\
["Distributed Extreme Gradient Boosting (XGBoost)"]
is_lgbm_class = config["basic"] \\
["algorithms"]["classification"] \\
["Distributed Light Gradient Boosting (LightGBM)"]
is_xgb_regress = config["basic"] \\
["algorithms"]["regression"] \\
["Distributed Extreme Gradient Boosting (XGBoost)"]
is_lgbm_regress = config["basic"] \\
["algorithms"]["regression"] \\
["Distributed Light Gradient Boosting (LightGBM)"]
if is_xgb_class=="True" or is_xgb_regress=="True":
algorithm = "Distributed Extreme Gradient Boosting (XGBoost)"
predict_config['algorithm'] = algorithm
if is_lgbm_class=="True" or is_lgbm_regress=="True":
algorithm = "Distributed Light Gradient Boosting (LightGBM)"
predict_config['algorithm'] = algorithm
cluster = dask.distributed.LocalCluster(n_workers=n_workers,
threads_per_worker=threads_per_worker,
# dashboard_address="127.0.0.1:8787"
)
client = dask.distributed.Client(cluster)
df_dask = load_data_dask(data_dir, npartitions=npartitions)
deployFolder = config["basic"]["deployLocation"]
modelName = config["basic"]["modelName"]
modelName = modelName.replace(" ", "_")
modelVersion = config["basic"]["modelVersion"]
modelLocation = os.path.join(deployFolder,modelName)
os.makedirs(modelLocation,exist_ok = True)
deployLocation = os.path.join(modelLocation,modelVersion)
predict_config['deployLocation'] = deployLocation
try:
os.makedirs(deployLocation)
except OSError as e:
shutil.rmtree(deployLocation)
time.sleep(2)
os.makedirs(deployLocation)
modelFolderLocation = os.path.join(deployLocation,'model')
try:
os.makedirs(modelFolderLocation)
except OSError as e:
print("\\nModel Folder Already Exists")
etcFolderLocation = os.path.join(deployLocation,'etc')
try:
os.makedirs(etcFolderLocation)
except OSError as e:
print("\\ETC Folder Already Exists")
logFolderLocation = os.path.join(deployLocation,'log')
try:
os.makedirs(logFolderLocation)
except OSError as e:
print("\\nLog Folder Already Exists")
logFileName=os.path.join(logFolderLocation,'model_training_logs.log')
outputjsonFile=os.path.join(deployLocation,'etc','output.json')
filehandler = logging.FileHandler(logFileName, 'w','utf-8')
formatter = logging.Formatter('%(message)s')
filehandler.setFormatter(formatter)
log = logging.getLogger('eion')
log.propagate = False
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
log.removeHandler(hdlr)
log.addHandler(filehandler)
log.setLevel(logging.INFO)
log.info('Status:-|... Distributed Learning Started')
config['basic']['modelLocation'] = deployLocation
# Get input for EDA
# descr, corr = get_dask_eda(df_dask=df_dask)
#print(descr)
# print(corr)
#print(df_dask.columns)
#print("target feature", target_feature)
df_dask = df_dask.dropna(subset=[target_feature])
if is_xgb_class == "True" or is_lgbm_class == "True":
df_dask = df_dask.categorize(columns=[target_feature])
df_dask[target_feature] = df_dask[target_feature].astype('category')
df_dask[target_feature] = df_dask[target_feature].cat.as_known()
label_mapping = dict(enumerate(df_dask[target_feature].cat.categories))
df_dask[target_feature] = df_dask[target_feature].cat.codes
label_mapping_file =os.path.join(deployLocation,'etc','label_mapping.json')
with open(label_mapping_file, 'w') as f:
json.dump(label_mapping, f)
if config["advance"]["profiler"]["removeDuplicate"] == "True":
df_dask = df_dask.drop_duplicates()
# Need to dropna for case of categoricalFillMethod
# if config["advance"]["profiler"]["numericalFillMethod"]["Drop"] == "True":
# df_dask = df_dask.dropna()
trainingFeatures = config["basic"]["trainingFeatures"].split(',')
if target_feature not in trainingFeatures:
trainingFeatures.append(target_feature)
df_dask = df_dask[trainingFeatures]
y = df_dask[target_feature]
X = df_dask.drop(target_feature, axis=1)
print("after X.shape, y.shape", X.shape, y.shape)
X_train, X_test, y_train, y_test = dask_ml.model_selection.train_test_split(X, y,
test_size=0.2, random_state=0)
trainingFeatures = config["basic"]["trainingFeatures"].split(',')
outputJson = None
conf_matrix_dict = {}
train_conf_matrix_dict = {}
try:
if is_xgb_class == "True":
modelName = 'Distributed Extreme Gradient Boosting (XGBoost)'
dask_model, X_train, X_test = train_xgb_classification(client, X_train, y_train, X_test, config)
class_names = list(label_mapping.values())
_, _, train_class_report, train_conf_matrix, train_roc_auc = evaluate_model_classification(dask_model, config,
X_train, y_train, class_names)
scoringCreteria,score, class_report, conf_matrix, roc_auc = evaluate_model_classification(dask_model, config,
X_test, y_test, class_names)
for i in range(len(conf_matrix)):
conf_matrix_dict_1 = {}
for j in range(len(conf_matrix[i])):
conf_matrix_dict_1['pre:' + str(class_names[j])] = int(conf_matrix[i][j])
conf_matrix_dict['act:'+ str(class_names[i])] = conf_matrix_dict_1
for i in range(len(train_conf_matrix)):
train_conf_matrix_dict_1 = {}
for j in range(len(train_conf_matrix[i])):
train_conf_matrix_dict_1['pre:' + str(class_names[j])] = int(train_conf_matrix[i][j])
train_conf_matrix_dict['act:'+ str(class_names[i])] = train_conf_matrix_dict_1
# print(roc_auc)
outputJson = {'status':'SUCCESS','data':{'ModelType':oProblemType,\\
'deployLocation':deployLocation,'BestModel':modelName,'BestScore':score,'ScoreType':scoringCreteria,\\
'matrix':{'ConfusionMatrix':conf_matrix_dict,'ClassificationReport':class_report,'ROC_AUC_SCORE':roc_auc},\\
'trainmatrix':{'ConfusionMatrix':train_conf_matrix_dict,'ClassificationReport':train_class_report,'ROC_AUC_SCORE':train_roc_auc},\\
'featuresused':trainingFeatures,'targetFeature':target_feature,'EvaluatedModels':[{'Model':modelName,'Score':score}],
'LogFile':logFileName}}
if is_lgbm_class == "True":
modelName = 'Distributed Light Gradient Boosting (LightGBM)'
dask_model, X_train, X_test = train_lgbm_classification(client, X_train, y_train, X_test, config)
class_names = list(label_mapping.values())
_, _, train_class_report, train_conf_matrix, train_roc_auc = evaluate_model_classification(dask_model, config,
X_train, y_train, class_names)
scoringCreteria,score, class_report, conf_matrix, roc_auc = evaluate_model_classification(dask_model, config,
X_test, y_test, class_names)
for i in range(len(conf_matrix)):
conf_matrix_dict_1 = {}
for j in range |
(len(conf_matrix[i])):
conf_matrix_dict_1['pre:' + str(class_names[j])] = int(conf_matrix[i][j])
conf_matrix_dict['act:'+ str(class_names[i])] = conf_matrix_dict_1
for i in range(len(train_conf_matrix)):
train_conf_matrix_dict_1 = {}
for j in range(len(train_conf_matrix[i])):
train_conf_matrix_dict_1['pre:' + str(class_names[j])] = int(train_conf_matrix[i][j])
train_conf_matrix_dict['act:'+ str(class_names[i])] = train_conf_matrix_dict_1
outputJson = {'status':'SUCCESS','data':{'ModelType':oProblemType,\\
'deployLocation':deployLocation,'BestModel':modelName,'BestScore':score,'ScoreType':scoringCreteria,\\
'matrix':{'ConfusionMatrix':conf_matrix_dict,'ClassificationReport':class_report,'ROC_AUC_SCORE':roc_auc},\\
'trainmatrix':{'ConfusionMatrix':train_conf_matrix_dict,'ClassificationReport':train_class_report,'ROC_AUC_SCORE':train_roc_auc},\\
'featuresused':trainingFeatures,'targetFeature':target_feature,'EvaluatedModels':[{'Model':modelName,'Score':score}],
'LogFile':logFileName}}
if is_xgb_regress == "True":
modelName = 'Distributed Extreme Gradient Boosting (XGBoost)'
dask_model, X_train, X_test = train_xgb_regression(client, X_train, y_train, X_test, config)
_, _, train_mse, train_rmse, train_norm_rmse, train_r2, train_mae, train_mape = evaluate_model_regression(dask_model, config,
X_train, y_train)
scoringCreteria, score, mse, rmse, norm_rmse, r2, mae, mape = evaluate_model_regression(dask_model, config,
X_test, y_test)
outputJson = {'status':'SUCCESS','data':{'ModelType':oProblemType,\\
'deployLocation':deployLocation,'BestModel':modelName,'BestScore':score,'ScoreType':scoringCreteria,\\
'matrix':{'MAE':mae,'R2Score':r2,'MSE':mse,'MAPE':mape,'RMSE':rmse,'Normalised RMSE(%)':norm_rmse}, \\
'trainmatrix':{'MAE':train_mae,'R2Score':train_r2,'MSE':train_mse,'MAPE':train_mape,'RMSE':train_rmse,'Normalised RMSE(%)':train_norm_rmse}, \\
'featuresused':trainingFeatures,'targetFeature':target_feature,'EvaluatedModels':[{'Model':modelName,'Score':score}],
'LogFile':logFileName}}
if is_lgbm_regress == "True":
modelName = 'Distributed Light Gradient Boosting (LightGBM)'
dask_model, X_train, X_test = train_lgbm_regression(client, X_train, y_train, X_test, config)
_, _, train_mse, train_rmse, train_norm_rmse, train_r2, train_mae, train_mape = evaluate_model_regression(dask_model, config,
X_train, y_train)
scoringCreteria, score, mse, rmse, norm_rmse, r2, mae, mape = evaluate_model_regression(dask_model, config,
X_test, y_test)
outputJson = {'status':'SUCCESS','data':{'ModelType':oProblemType,\\
'deployLocation':deployLocation,'BestModel':modelName,'BestScore':score,'ScoreType':scoringCreteria,\\
'matrix':{'MAE':mae,'R2Score':r2,'MSE':mse,'MAPE':mape,'RMSE':rmse,'Normalised RMSE(%)':norm_rmse}, \\
'trainmatrix':{'MAE':train_mae,'R2Score':train_r2,'MSE':train_mse,'MAPE':train_mape,'RMSE':train_rmse,'Normalised RMSE(%)':train_norm_rmse}, \\
'featuresused':trainingFeatures,'targetFeature':target_feature,'EvaluatedModels':[{'Model':modelName,'Score':score}],
'LogFile':logFileName}}
src = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','utilities','dl_aion_predict.py')
shutil.copy2(src,deployLocation)
os.rename(os.path.join(deployLocation,'dl_aion_predict.py'),os.path.join(deployLocation,'aion_predict.py'))
except Exception as e:
outputJson = {"status":"FAIL","message":str(e)}
print(e)
client.close()
cluster.close()
log.info('Status:-|... Distributed Learning Completed')
with open(outputjsonFile, 'w') as f:
json.dump(outputJson, f)
f.close()
output_json = json.dumps(outputJson)
log.info('aion_learner_status:'+str(output_json))
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
hdlr.close()
log.removeHandler(hdlr)
print("\\n")
print("aion_learner_status:",output_json)
print("\\n")
end_time = time.time()
print("--- %s processing time (sec) ---" % (end_time - start_time)) <s><s> import autograd
import autograd.numpy as np
import scipy.optimize
from autograd import grad
from autograd.scipy.special import logsumexp
from sklearn.cluster import KMeans
class HMM:
"""
A Hidden Markov Model with Gaussian observations with
unknown means and known precisions.
"""
def __init__(self, X, config_dict=None):
self.N, self.T, self.D = X.shape
self.K = config_dict['K'] # number of HMM states
self.I = np.eye(self.K)
self.Precision = np.zeros([self.D, self.D, self.K])
self.X = X
if config_dict['precision'] is None:
for k in np.arange(self.K):
self.Precision[:, :, k] = np.eye(self.D)
else:
self.Precision = config_dict['precision']
self.dParams_dWeights = None
self.alphaT = None # Store the final beliefs.
self.beta1 = None # store the first timestep beliefs from the beta recursion.
self.forward_trellis = {} # stores \\alpha
self.backward_trellis = {} # stores \\beta
def initialize_params(self, seed=1234):
np.random.seed(seed)
param_dict = {}
A = np.random.randn(self.K, self.K)
# use k-means to initialize the mean parameters
X = self.X.reshape([-1, self.D])
kmeans = KMeans(n_clusters=self.K, random_state=seed,
n_init=15).fit(X)
labels = kmeans.labels_
_, counts = np.unique(labels, return_counts=True)
pi = counts
phi = kmeans.cluster_centers_
param_dict['A'] = np.exp(A)
param_dict['pi0'] = pi
param_dict['phi'] = phi
return self.pack_params(param_dict)
def unpack_params(self, params):
param_dict = dict()
K = self.K
# For unpacking simplex parameters: have packed them as
# log(pi[:-1]) - log(pi[-1]).
unnorm_A = np.exp(np.append(params[:K**2-K].reshape(K, K-1),
np.zeros((K, 1)),
axis=1)
)
Z = np.sum(unnorm_A[:, :-1], axis=1)
unnorm_A /= Z[:, np.newaxis]
norm_A = unnorm_A / unnorm_A.sum(axis=1, keepdims=True)
param_dict['A'] = norm_A
unnorm_pi = np.exp(np.append(params[K**2-K:K**2-1], 0.0))
Z = np.sum(unnorm_pi[:-1])
unnorm_pi /= Z
param_dict['pi0'] = unnorm_pi / unnorm_pi.sum()
param_dict['phi'] = params[K**2-K+K-1:].reshape(self.D, K)
return param_dict
def weighted_alpha_recursion(self, xseq, pi, phi, Sigma, A, wseq, store_belief=False):
"""
Computes the weighted marginal probability of the sequence xseq given parameters;
weights wseq turn on or off the emissions p(x_t | z_t) (weighting scheme B)
:param xseq: T * D
:param pi: K * 1
:param phi: D * K
:param wseq: T * 1
:param A:
:return:
"""
ll = self.log_obs_lik(xseq[:, :, np.newaxis], phi[np.newaxis, :, :], Sigma)
alpha = np.log(pi.ravel()) + wseq[0] * ll[0]
if wseq[0] == 0:
self.forward_trellis[0] = alpha[:, np.newaxis]
for t in np.arange(1, self.T):
alpha = logsumexp(alpha[:, np.newaxis] + np.log(A), axis=0) + wseq[t] * ll[t]
if wseq[t] == 0:
# store the trellis, would be used to compute the posterior z_t | x_1...x_t-1, x_t+1, ...x_T
self.forward_trellis[t] = alpha[:, np.newaxis]
if store_belief:
# store the final belief
self.alphaT = alpha
return logsumexp(alpha)
def weighted_beta_recursion(self, xseq, pi, phi, Sigma, A, wseq, store_belief=False):
"""
Runs beta recursion;
weights wseq turn on or off the emissions p(x_t | z_t) (weighting scheme B)
:param xseq: T * D
:param pi: K * 1
:param phi: D * K
:param wseq: T * 1
:param A:
:return:
"""
ll = self.log_obs_lik(xseq[:, :, np.newaxis], phi[np.newaxis, :, :], Sigma)
beta = np.zeros_like(pi.ravel()) # log(\\beta) of all ones.
max_t = ll.shape[0]
if wseq[max_t - 1] == 0:
# store the trellis, would be used to compute the posterior z_t | x_1...x_t-1, x_t+1, ...x_T
self.backward_trellis[max_t - 1] = beta[:, np.newaxis]
for i in np.arange(1, max_t):
t = max_t - i - 1
beta = logsumexp((beta + wseq[t + 1] * ll[t + 1])[np.newaxis, :] + np.log(A), axis=1)
if wseq[t] == 0:
# store the trellis, would be used to compute the posterior z_t | x_1...x_t-1, x_t+1, ...x_T
self.backward_trellis[t] = beta[:, np.newaxis]
# account for the init prob
beta = (beta + wseq[0] * ll[0]) + np.log(pi.ravel())
if store_belief:
# store the final belief
self.beta1 = beta
return logsumexp(beta)
def weighted_loss(self, params, weights):
"""
For LOOCV / IF computation within a single sequence. Uses weighted alpha recursion
:param params:
:param weights:
:return:
"""
param_dict = self.unpack_params(params)
logp = self.get_prior_contrib(param_dict)
logp = logp + self.weighted_alpha_recursion(self.X[0], param_dict['pi0'],
param_dict['phi'],
self.Precision,
param_dict['A'],
weights)
return -logp
def loss_at_missing_timesteps(self, weights, params):
"""
:param weights: zeroed out weights indicate missing values
:param params: packed parameters
:return:
"""
# empty forward and backward trellis
self.clear_trellis()
param_dict = self.unpack_params(params)
# populate forward and backward trellis
lpx = self.weighted_alpha_recursion(self.X[0], param_dict['pi0'],
param_dict['phi'],
self.Precision,
param_dict['A'],
weights,
store_belief=True )
lpx_alt = self.weighted_beta_recursion(self.X[0], param_dict['pi0'],
param_dict['phi'],
self.Precision,
param_dict['A'],
weights,
store_belief=True)
assert np.allclose(lpx, lpx_alt) # sanity check
test_ll = []
# compute loo likelihood
ll = self.log_obs_lik(self.X[0][:, :, np.newaxis], param_dict['phi'], self.Precision)
# compute posterior p(z_t | x_1,...t-1, t+1,...T) \\forall missing t
tsteps = []
for t in self.forward_trellis.keys():
lpz_given_x = self.forward_trellis[t] + self.backward_trellis[t] - lpx
test_ll.append(logsumexp(ll[t] + lpz_given_x.ravel()))
tsteps.append(t)
# empty forward and backward trellis
self.clear_trellis()
return -np.array(test_ll)
def fit(self, weights, init_params=None, num_random_restarts=1, verbose=False, maxiter=None):
if maxiter:
options_dict = {'disp': verbose, 'gtol': 1e-10, 'maxiter': maxiter}
else:
options_dict = {'disp': verbose, 'gtol': 1e-10}
# Define a function that returns gradients of training loss using Autograd.
training_loss_fun = |
lambda params: self.weighted_loss(params, weights)
training_gradient_fun = grad(training_loss_fun, 0)
if init_params is None:
init_params = self.initialize_params()
if verbose:
print("Initial loss: ", training_loss_fun(init_params))
res = scipy.optimize.minimize(fun=training_loss_fun,
jac=training_gradient_fun,
x0=init_params,
tol=1e-10,
options=options_dict)
if verbose:
print('grad norm =', np.linalg.norm(res.jac))
return res.x
def clear_trellis(self):
self.forward_trellis = {}
self.backward_trellis = {}
#### Required for IJ computation ###
def compute_hessian(self, params_one, weights_one):
return autograd.hessian(self.weighted_loss, argnum=0)(params_one, weights_one)
def compute_jacobian(self, params_one, weights_one):
return autograd.jacobian(autograd.jacobian(self.weighted_loss, argnum=0), argnum=1)\\
(params_one, weights_one).squeeze()
###################################################
@staticmethod
def log_obs_lik(x, phi, Sigma):
"""
:param x: T*D*1
:param phi: 1*D*K
:param Sigma: D*D*K --- precision matrices per state
:return: ll
"""
centered_x = x - phi
ll = -0.5 * np.einsum('tdk, tdk, ddk -> tk', centered_x, centered_x, Sigma )
return ll
@staticmethod
def pack_params(params_dict):
param_list = [(np.log(params_dict['A'][:, :-1]) -
np.log(params_dict['A'][:, -1])[:, np.newaxis]).ravel(),
np.log(params_dict['pi0'][:-1]) - np.log(params_dict['pi0'][-1]),
params_dict['phi'].ravel()]
return np.concatenate(param_list)
@staticmethod
def get_prior_contrib(param_dict):
logp = 0.0
# Prior
logp += -0.5 * (np.linalg.norm(param_dict['phi'], axis=0) ** 2).sum()
logp += (1.1 - 1) * np.log(param_dict['A']).sum()
logp += (1.1 - 1) * np.log(param_dict['pi0']).sum()
return logp
@staticmethod
def get_indices_in_held_out_fold(T, pct_to_drop, contiguous=False):
"""
:param T: length of the sequence
:param pct_to_drop: % of T in the held out fold
:param contiguous: if True generate a block of indices to drop else generate indices by iid sampling
:return: o (the set of indices in the fold)
"""
if contiguous:
l = np.floor(pct_to_drop / 100. * T)
anchor = np.random.choice(np.arange(l + 1, T))
o = np.arange(anchor - l, anchor).astype(int)
else:
# i.i.d LWCV
o = np.random.choice(T - 2, size=np.int(pct_to_drop / 100. * T), replace=False) + 1
return o
@staticmethod
def synthetic_hmm_data(K, T, D, sigma0=None, seed=1234, varainces_of_mean=1.0,
diagonal_upweight=False):
"""
:param K: Number of HMM states
:param T: length of the sequence
"""
N = 1 # For structured IJ we will remove data / time steps from a single sequence
np.random.seed(seed)
if sigma0 is None:
sigma0 = np.eye(D)
A = np.random.dirichlet(alpha=np.ones(K), size=K)
if diagonal_upweight:
A = A + 3 * np.eye(K) # add 3 to the diagonal and renormalize to encourage self transitions
A = A / A.sum(axis=1)
pi0 = np.random.dirichlet(alpha=np.ones(K))
mus = np.random.normal(size=(K, D), scale=np.sqrt(varainces_of_mean))
zs = np.empty((N, T), dtype=np.int)
X = np.empty((N, T, D))
for n in range(N):
zs[n, 0] = int(np.random.choice(np.arange(K), p=pi0))
X[n, 0] = np.random.multivariate_normal(mean=mus[zs[n, 0]], cov=sigma0)
for t in range(1, T):
zs[n, t] = int(np.random.choice(np.arange(K), p=A[zs[n, t - 1], :]))
X[n, t] = np.random.multivariate_normal(mean=mus[zs[n, t]], cov=sigma0)
return {'X': X, 'state_assignments': zs, 'A': A, 'initial_state_assignment': pi0, 'means': mus}
<s><s> from builtins import range
import autograd.numpy as np
def adam(grad, x, callback=None, num_iters=100, step_size=0.001, b1=0.9, b2=0.999, eps=10**-8, polyak=False):
"""Adapted from autograd.misc.optimizers"""
m = np.zeros(len(x))
v = np.zeros(len(x))
for i in range(num_iters):
g = grad(x, i)
if callback: callback(x, i, g, polyak)
m = (1 - b1) * g + b1 * m # First moment estimate.
v = (1 - b2) * (g**2) + b2 * v # Second moment estimate.
mhat = m / (1 - b1**(i + 1)) # Bias correction.
vhat = v / (1 - b2**(i + 1))
x = x - step_size*mhat/(np.sqrt(vhat) + eps)
return x<s> import matplotlib.pyplot as plt
import numpy as np
import numpy.random as npr
import torch as torch
def make_data_gap(seed, data_count=100):
import GPy
npr.seed(0)
x = np.hstack([np.linspace(-5, -2, int(data_count/2)), np.linspace(2, 5, int(data_count/2))])
x = x[:, np.newaxis]
k = GPy.kern.RBF(input_dim=1, variance=1., lengthscale=1.)
K = k.K(x)
L = np.linalg.cholesky(K + 1e-5 * np.eye(data_count))
# draw a noise free random function from a GP
eps = np.random.randn(data_count)
f = L @ eps
# use a homoskedastic Gaussian noise model N(f(x)_i, \\sigma^2). \\sigma^2 = 0.1
eps_noise = np.sqrt(0.1) * np.random.randn(data_count)
y = f + eps_noise
y = y[:, np.newaxis]
plt.plot(x, f, 'ko', ms=2)
plt.plot(x, y, 'ro')
plt.title("GP generated Data")
plt.pause(1)
return torch.FloatTensor(x), torch.FloatTensor(y), torch.FloatTensor(x), torch.FloatTensor(y)
def make_data_sine(seed, data_count=450):
# fix the random seed
np.random.seed(seed)
noise_var = 0.1
X = np.linspace(-4, 4, data_count)
y = 1*np.sin(X) + np.sqrt(noise_var)*npr.randn(data_count)
train_count = int (0.2 * data_count)
idx = npr.permutation(range(data_count))
X_train = X[idx[:train_count], np.newaxis ]
X_test = X[ idx[train_count:], np.newaxis ]
y_train = y[ idx[:train_count] ]
y_test = y[ idx[train_count:] ]
mu = np.mean(X_train, 0)
std = np.std(X_train, 0)
X_train = (X_train - mu) / std
X_test = (X_test - mu) / std
mu = np.mean(y_train, 0)
std = np.std(y_train, 0)
# mu = 0
# std = 1
y_train = (y_train - mu) / std
y_test = (y_test -mu) / std
train_stats = dict()
train_stats['mu'] = torch.FloatTensor([mu])
train_stats['sigma'] = torch.FloatTensor([std])
return torch.FloatTensor(X_train), torch.FloatTensor(y_train), torch.FloatTensor(X_test), torch.FloatTensor(y_test),\\
train_stats<s> import autograd
import autograd.numpy as np
import numpy.random as npr
import scipy.optimize
sigmoid = lambda x: 0.5 * (np.tanh(x / 2.) + 1)
get_num_train = lambda inputs: inputs.shape[0]
logistic_predictions = lambda params, inputs: sigmoid(np.dot(inputs, params))
class LogisticRegression:
def __init__(self):
self.params = None
def set_parameters(self, params):
self.params = params
def predict(self, X):
if self.params is not None:
# Outputs probability of a label being true according to logistic model
return np.atleast_2d(sigmoid(np.dot(X, self.params))).T
else:
raise RuntimeError("Params need to be fit before predictions can be made.")
def loss(self, params, weights, inputs, targets):
# Training loss is the negative log-likelihood of the training labels.
preds = logistic_predictions(params, inputs)
label_probabilities = preds * targets + (1 - preds) * (1 - targets)
return -np.sum(weights * np.log(label_probabilities + 1e-16))
def fit(self, weights, init_params, inputs, targets, verbose=True):
training_loss_fun = lambda params: self.loss(params, weights, inputs, targets)
# Define a function that returns gradients of training loss using Autograd.
training_gradient_fun = autograd.grad(training_loss_fun, 0)
# optimize params
if verbose:
print("Initial loss:", self.loss(init_params, weights, inputs, targets))
# opt_params = sgd(training_gradient_fun, params, hyper=1, num_iters=5000, step_size=0.1)
res = scipy.optimize.minimize(fun=training_loss_fun,
jac=training_gradient_fun,
x0=init_params,
tol=1e-10,
options={'disp': verbose})
opt_params = res.x
if verbose:
print("Trained loss:", self.loss(opt_params, weights, inputs, targets))
self.params = opt_params
return opt_params
def get_test_acc(self, params, test_targets, test_inputs):
preds = np.round(self.predict(test_inputs).T).astype(np.int)
err = np.abs(test_targets - preds).sum()
return 1 - err/ test_targets.shape[1]
#### Required for IJ computation ###
def compute_hessian(self, params_one, weights_one, inputs, targets):
return autograd.hessian(self.loss, argnum=0)(params_one, weights_one, inputs, targets)
def compute_jacobian(self, params_one, weights_one, inputs, targets):
return autograd.jacobian(autograd.jacobian(self.loss, argnum=0), argnum=1)\\
(params_one, weights_one, inputs, targets).squeeze()
###################################################
@staticmethod
def synthetic_lr_data(N=10000, D=10):
x = 1. * npr.randn(N, D)
x_test = 1. * npr.randn(int(0.3 * N), D)
w = npr.randn(D, 1)
y = sigmoid((x @ w)).ravel()
y = npr.binomial(n=1, p=y) # corrupt labels
y_test = sigmoid(x_test @ w).ravel()
# y_test = np.round(y_test)
y_test = npr.binomial(n=1, p=y_test)
return x, np.atleast_2d(y), x_test, np.atleast_2d(y_test)
<s> import abc
import sys
# Ensure compatibility with Python 2/3
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta(str('ABC'), (), {})
from copy import deepcopy
import numpy as np
import numpy.random as npr
def make_batches(n_data, batch_size):
return [slice(i, min(i+batch_size, n_data)) for i in range(0, n_data, batch_size)]
def generate_regression_data(seed, data_count=500):
"""
Generate data from a noisy sine wave.
:param seed: random number seed
:param data_count: number of data points.
:return:
"""
np.random.seed(seed)
noise_var = 0.1
x = np.linspace(-4, 4, data_count)
y = 1*np.sin(x) + np.sqrt(noise_var)*npr.randn(data_count)
train_count = int (0.2 * data_count)
idx = npr.permutation(range(data_count))
x_train = x[idx[:train_count], np.newaxis ]
x_test = x[ idx[train_count:], np.newaxis ]
y_train = y[ idx[:train_count] ]
y_test = y[ idx[train_count:] ]
mu = np.mean(x_train, 0)
std = np.std(x_train, 0)
x_train = ( |
x_train - mu) / std
x_test = (x_test - mu) / std
mu = np.mean(y_train, 0)
std = np.std(y_train, 0)
y_train = (y_train - mu) / std
train_stats = dict()
train_stats['mu'] = mu
train_stats['sigma'] = std
return x_train, y_train, x_test, y_test, train_stats
def form_D_for_auucc(yhat, zhatl, zhatu):
# a handy routine to format data as needed by the UCC fit() method
D = np.zeros([yhat.shape[0], 3])
D[:, 0] = yhat.squeeze()
D[:, 1] = zhatl.squeeze()
D[:, 2] = zhatu.squeeze()
return D
def fitted_ucc_w_nullref(y_true, y_pred_mean, y_pred_lower, y_pred_upper):
"""
Instantiates an UCC object for the target predictor plus a 'null' (constant band) reference
:param y_pred_lower:
:param y_pred_mean:
:param y_pred_upper:
:param y_true:
:return: ucc object fitted for two systems: target + null reference
"""
# form matrix for ucc:
X_for_ucc = form_D_for_auucc(y_pred_mean.squeeze(),
y_pred_mean.squeeze() - y_pred_lower.squeeze(),
y_pred_upper.squeeze() - y_pred_mean.squeeze())
# form matrix for a 'null' system (constant band)
X_null = deepcopy(X_for_ucc)
X_null[:,1:] = np.std(y_pred_mean) # can be set to any other constant (no effect on AUUCC)
# create an instance of ucc and fit data
from uq360.metrics.uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve as ucc
u = ucc()
u.fit([X_for_ucc, X_null], y_true.squeeze())
return u
def make_sklearn_compatible_scorer(task_type, metric, greater_is_better=True, **kwargs):
"""
Args:
task_type: (str) regression or classification.
metric: (str): choice of metric can be one of these - [aurrrc, ece, auroc, nll, brier, accuracy] for
classification and ["rmse", "nll", "auucc_gain", "picp", "mpiw", "r2"] for regression.
greater_is_better: is False the scores are negated before returning.
**kwargs: additional arguments specific to some metrics.
Returns:
sklearn compatible scorer function.
"""
from uq360.metrics.classification_metrics import compute_classification_metrics
from uq360.metrics.regression_metrics import compute_regression_metrics
def sklearn_compatible_score(model, X, y_true):
"""
Args:
model: The model being scored. Currently uq360 and sklearn models are supported.
X: Input features.
y_true: ground truth values for the target.
Returns:
Computed score of the model.
"""
from uq360.algorithms.builtinuq import BuiltinUQ
from uq360.algorithms.posthocuq import PostHocUQ
if isinstance(model, BuiltinUQ) or isinstance(model, PostHocUQ):
# uq360 models
if task_type == "classification":
score = compute_classification_metrics(
y_true=y_true,
y_prob=model.predict(X).y_prob,
option=metric,
**kwargs
)[metric]
elif task_type == "regression":
y_mean, y_lower, y_upper = model.predict(X)
score = compute_regression_metrics(
y_true=y_true,
y_mean=y_mean,
y_lower=y_lower,
y_upper=y_upper,
option=metric,
**kwargs
)[metric]
else:
raise NotImplementedError
else:
# sklearn models
if task_type == "classification":
score = compute_classification_metrics(
y_true=y_true,
y_prob=model.predict_proba(X),
option=metric,
**kwargs
)[metric]
else:
if metric in ["rmse", "r2"]:
score = compute_regression_metrics(
y_true=y_true,
y_mean=model.predict(X),
y_lower=None,
y_upper=None,
option=metric,
**kwargs
)[metric]
else:
raise NotImplementedError("{} is not supported for sklearn regression models".format(metric))
if not greater_is_better:
score = -score
return score
return sklearn_compatible_score
class DummySklearnEstimator(ABC):
def __init__(self, num_classes, base_model_prediction_fn):
self.base_model_prediction_fn = base_model_prediction_fn
self.classes_ = [i for i in range(num_classes)]
def fit(self):
pass
def predict_proba(self, X):
return self.base_model_prediction_fn(X)
<s> # Adapted from https://github.com/Trusted-AI/AIX360/blob/master/aix360/datasets/meps_dataset.py
# Utilization target is kept as a continuous target.
import os
import pandas as pd
def default_preprocessing(df):
"""
1.Create a new column, RACE that is 'White' if RACEV2X = 1 and HISPANX = 2 i.e. non Hispanic White
and 'non-White' otherwise
2. Restrict to Panel 19
3. RENAME all columns that are PANEL/ROUND SPECIFIC
4. Drop rows based on certain values of individual features that correspond to missing/unknown - generally < -1
5. Compute UTILIZATION.
"""
def race(row):
if ((row['HISPANX'] == 2) and (row['RACEV2X'] == 1)): #non-Hispanic Whites are marked as WHITE; all others as NON-WHITE
return 'White'
return 'Non-White'
df['RACEV2X'] = df.apply(lambda row: race(row), axis=1)
df = df.rename(columns = {'RACEV2X' : 'RACE'})
df = df[df['PANEL'] == 19]
# RENAME COLUMNS
df = df.rename(columns = {'FTSTU53X' : 'FTSTU', 'ACTDTY53' : 'ACTDTY', 'HONRDC53' : 'HONRDC', 'RTHLTH53' : 'RTHLTH',
'MNHLTH53' : 'MNHLTH', 'CHBRON53' : 'CHBRON', 'JTPAIN53' : 'JTPAIN', 'PREGNT53' : 'PREGNT',
'WLKLIM53' : 'WLKLIM', 'ACTLIM53' : 'ACTLIM', 'SOCLIM53' : 'SOCLIM', 'COGLIM53' : 'COGLIM',
'EMPST53' : 'EMPST', 'REGION53' : 'REGION', 'MARRY53X' : 'MARRY', 'AGE53X' : 'AGE',
'POVCAT15' : 'POVCAT', 'INSCOV15' : 'INSCOV'})
df = df[df['REGION'] >= 0] # remove values -1
df = df[df['AGE'] >= 0] # remove values -1
df = df[df['MARRY'] >= 0] # remove values -1, -7, -8, -9
df = df[df['ASTHDX'] >= 0] # remove values -1, -7, -8, -9
df = df[(df[['FTSTU','ACTDTY','HONRDC','RTHLTH','MNHLTH','HIBPDX','CHDDX','ANGIDX','EDUCYR','HIDEG',
'MIDX','OHRTDX','STRKDX','EMPHDX','CHBRON','CHOLDX','CANCERDX','DIABDX',
'JTPAIN','ARTHDX','ARTHTYPE','ASTHDX','ADHDADDX','PREGNT','WLKLIM',
'ACTLIM','SOCLIM','COGLIM','DFHEAR42','DFSEE42','ADSMOK42',
'PHQ242','EMPST','POVCAT','INSCOV']] >= -1).all(1)] #for all other categorical features, remove values < -1
def utilization(row):
return row['OBTOTV15'] + row['OPTOTV15'] + row['ERTOT15'] + row['IPNGTD15'] + row['HHTOTD15']
df['TOTEXP15'] = df.apply(lambda row: utilization(row), axis=1)
df = df.rename(columns = {'TOTEXP15' : 'UTILIZATION'})
df = df[['REGION','AGE','SEX','RACE','MARRY',
'FTSTU','ACTDTY','HONRDC','RTHLTH','MNHLTH','HIBPDX','CHDDX','ANGIDX',
'MIDX','OHRTDX','STRKDX','EMPHDX','CHBRON','CHOLDX','CANCERDX','DIABDX',
'JTPAIN','ARTHDX','ARTHTYPE','ASTHDX','ADHDADDX','PREGNT','WLKLIM',
'ACTLIM','SOCLIM','COGLIM','DFHEAR42','DFSEE42','ADSMOK42','PCS42',
'MCS42','K6SUM42','PHQ242','EMPST','POVCAT','INSCOV','UTILIZATION','PERWT15F']]
return df
class MEPSDataset():
"""
The Medical Expenditure Panel Survey (MEPS) [#]_ data consists of large scale surveys of families and individuals,
medical providers, and employers, and collects data on health services used, costs & frequency of services,
demographics, health status and conditions, etc., of the respondents.
This specific dataset contains MEPS survey data for calendar year 2015 obtained in rounds 3, 4, and 5 of Panel 19,
and rounds 1, 2, and 3 of Panel 20.
See :file:`uq360/datasets/data/meps_data/README.md` for more details on the dataset and instructions on downloading/processing the data.
References:
.. [#] `Medical Expenditure Panel Survey data <https://meps.ahrq.gov/mepsweb/>`_
"""
def __init__(self, custom_preprocessing=default_preprocessing, dirpath=None):
self._dirpath = dirpath
if not self._dirpath:
self._dirpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'meps_data')
self._filepath = os.path.join(self._dirpath, 'h181.csv')
try:
df = pd.read_csv(self._filepath, sep=',', na_values=[])
except IOError as err:
print("IOError: {}".format(err))
print("To use this class, please place the heloc_dataset.csv:")
print("file, as-is, in the folder:")
print("\\n\\t{}\\n".format(os.path.abspath(os.path.join(
os.path.abspath(__file__), 'data', 'meps_data'))))
import sys
sys.exit(1)
if custom_preprocessing:
self._data = custom_preprocessing(df)
def data(self):
return self._data<s> from .meps_dataset import MEPSDataset
<s><s> import abc
import sys
# Ensure compatibility with Python 2/3
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta(str('ABC'), (), {})
class BuiltinUQ(ABC):
""" BuiltinUQ is the base class for any algorithm that has UQ built into it.
"""
def __init__(self, *argv, **kwargs):
""" Initialize a BuiltinUQ object.
"""
@abc.abstractmethod
def fit(self, *argv, **kwargs):
""" Learn the UQ related parameters..
"""
raise NotImplementedError
@abc.abstractmethod
def predict(self, *argv, **kwargs):
""" Method to obtain the predicitve uncertainty, this can return the total, epistemic and/or aleatoric
uncertainty in the predictions.
"""
raise NotImplementedError
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
<s><s> import abc
import sys
# Ensure compatibility with Python 2/3
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta(str('ABC'), (), {})
class PostHocUQ(ABC):
""" PostHocUQ is the base class for any algorithm that quantifies uncertainty of a pre-trained model.
"""
def __init__(self, *argv, **kwargs):
""" Initialize a BuiltinUQ object.
"""
@abc.abstractmethod
def _process_pretrained_model(self, *argv, **kwargs):
""" Method to process the pretrained model that requires UQ.
"""
raise NotImplementedError
@abc.abstractmethod
def predict(self, *argv, **kwargs):
""" Method to obtain the predicitve uncertainty, this can return the total, epistemic and/or aleatoric
uncertainty in the predictions.
"""
raise NotImplementedError
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
def get_params(self):
"""
This method should not take any arguments and returns a dict of the __init__ parameters.
"""
raise NotImplementedError
<s> from collections import namedtuple
import numpy as np
import torch
from scipy.stats import norm
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
from uq360.algorithms.builtinuq import BuiltinUQ
from uq360.models.heteroscedastic_mlp import GaussianNoiseMLPNet as _MLPNet
np.random.seed(42)
torch.manual_seed(42)
class HeteroscedasticReg |
ression(BuiltinUQ):
""" Wrapper for heteroscedastic regression. We learn to predict targets given features,
assuming that the targets are noisy and that the amount of noise varies between data points.
https://en.wikipedia.org/wiki/Heteroscedasticity
"""
def __init__(self, model_type=None, model=None, config=None, device=None, verbose=True):
"""
Args:
model_type: The base model architecture. Currently supported values are [mlp].
mlp modeltype learns a multi-layer perceptron with a heteroscedastic Gaussian likelihood. Both the
mean and variance of the Gaussian are functions of the data point ->git N(y_n | mlp_mu(x_n), mlp_var(x_n))
model: (optional) The prediction model. Currently support pytorch models that returns mean and log variance.
config: dictionary containing the config parameters for the model.
device: device used for pytorch models ignored otherwise.
verbose: if True, print statements with the progress are enabled.
"""
super(HeteroscedasticRegression).__init__()
self.config = config
self.device = device
self.verbose = verbose
if model_type == "mlp":
self.model_type = model_type
self.model = _MLPNet(
num_features=self.config["num_features"],
num_outputs=self.config["num_outputs"],
num_hidden=self.config["num_hidden"],
)
elif model_type == "custom":
self.model_type = model_type
self.model = model
else:
raise NotImplementedError
def get_params(self, deep=True):
return {"model_type": self.model_type, "config": self.config, "model": self.model,
"device": self.device, "verbose": self.verbose}
def _loss(self, y_true, y_pred_mu, y_pred_log_var):
return torch.mean(0.5 * torch.exp(-y_pred_log_var) * torch.abs(y_true - y_pred_mu) ** 2 +
0.5 * y_pred_log_var)
def fit(self, X, y):
""" Fit the Heteroscedastic Regression model.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the training data.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
X = torch.from_numpy(X).float().to(self.device)
y = torch.from_numpy(y).float().to(self.device)
dataset_loader = DataLoader(
TensorDataset(X,y),
batch_size=self.config["batch_size"]
)
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.config["lr"])
for epoch in range(self.config["num_epochs"]):
avg_loss = 0.0
for batch_x, batch_y in dataset_loader:
self.model.train()
batch_y_pred_mu, batch_y_pred_log_var = self.model(batch_x)
loss = self.model.loss(batch_y, batch_y_pred_mu, batch_y_pred_log_var)
optimizer.zero_grad()
loss.backward()
optimizer.step()
avg_loss += loss.item()/len(dataset_loader)
if self.verbose:
print("Epoch: {}, loss = {}".format(epoch, avg_loss))
return self
def predict(self, X, return_dists=False):
"""
Obtain predictions for the test points.
In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True)
and full predictive distribution (return_dists=True).
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
return_dists: If True, the predictive distribution for each instance using scipy distributions is returned.
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
dists: list of predictive distribution as `scipy.stats` objects with length n_samples.
Only returned when `return_dists` is True.
"""
self.model.eval()
X = torch.from_numpy(X).float().to(self.device)
dataset_loader = DataLoader(
X,
batch_size=self.config["batch_size"]
)
y_mean_list = []
y_log_var_list = []
for batch_x in dataset_loader:
batch_y_pred_mu, batch_y_pred_log_var = self.model(batch_x)
y_mean_list.append(batch_y_pred_mu.data.cpu().numpy())
y_log_var_list.append(batch_y_pred_log_var.data.cpu().numpy())
y_mean = np.concatenate(y_mean_list)
y_log_var = np.concatenate(y_log_var_list)
y_std = np.sqrt(np.exp(y_log_var))
y_lower = y_mean - 2.0*y_std
y_upper = y_mean + 2.0*y_std
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
if return_dists:
dists = [norm(loc=y_mean[i], scale=y_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_dists',))
res = Result(*res, y_dists=dists)
return res
<s> from .heteroscedastic_regression import HeteroscedasticRegression<s> from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from scipy.stats import norm
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
from uq360.algorithms.builtinuq import BuiltinUQ
np.random.seed(42)
torch.manual_seed(42)
class _MLPNet_Main(torch.nn.Module):
def __init__(self, num_features, num_outputs, num_hidden):
super(_MLPNet_Main, self).__init__()
self.fc = torch.nn.Linear(num_features, num_hidden)
self.fc_mu = torch.nn.Linear(num_hidden, num_outputs)
self.fc_log_var = torch.nn.Linear(num_hidden, num_outputs)
def forward(self, x):
x = F.relu(self.fc(x))
mu = self.fc_mu(x)
log_var = self.fc_log_var(x)
return mu, log_var
class _MLPNet_Aux(torch.nn.Module):
def __init__(self, num_features, num_outputs, num_hidden):
super(_MLPNet_Aux, self).__init__()
self.fc = torch.nn.Linear(num_features, num_hidden)
self.fc_log_var = torch.nn.Linear(num_hidden, num_outputs)
def forward(self, x):
x = F.relu(self.fc(x))
log_var = self.fc_log_var(x)
return log_var
class AuxiliaryIntervalPredictor(BuiltinUQ):
""" Auxiliary Interval Predictor [1]_ uses an auxiliary model to encourage calibration of the main model.
References:
.. [1] Thiagarajan, J. J., Venkatesh, B., Sattigeri, P., & Bremer, P. T. (2020, April). Building calibrated deep
models via uncertainty matching with auxiliary interval predictors. In Proceedings of the AAAI Conference on
Artificial Intelligence (Vol. 34, No. 04, pp. 6005-6012). https://arxiv.org/abs/1909.04079
"""
def __init__(self, model_type=None, main_model=None, aux_model=None, config=None, device=None, verbose=True):
"""
Args:
model_type: The model type used to build the main model and the auxiliary model. Currently supported values
are [mlp, custom]. `mlp` modeltype learns a mlp neural network using pytorch framework. For `custom` the user
provide `main_model` and `aux_model`.
main_model: (optional) The main prediction model. Currently support pytorch models that return mean and log variance.
aux_model: (optional) The auxiliary prediction model. Currently support pytorch models that return calibrated log variance.
config: dictionary containing the config parameters for the model.
device: device used for pytorch models ignored otherwise.
verbose: if True, print statements with the progress are enabled.
"""
super(AuxiliaryIntervalPredictor).__init__()
self.config = config
self.device = device
self.verbose = verbose
if model_type == "mlp":
self.model_type = model_type
self.main_model = _MLPNet_Main(
num_features=self.config["num_features"],
num_outputs=self.config["num_outputs"],
num_hidden=self.config["num_hidden"],
)
self.aux_model = _MLPNet_Aux(
num_features=self.config["num_features"],
num_outputs=self.config["num_outputs"],
num_hidden=self.config["num_hidden"],
)
elif model_type == "custom":
self.model_type = model_type
self.main_model = main_model
self.aux_model = aux_model
else:
raise NotImplementedError
def get_params(self, deep=True):
return {"model_type": self.model_type, "config": self.config, "main_model": self.main_model,
"aux_model": self.aux_model, "device": self.device, "verbose": self.verbose}
def _main_model_loss(self, y_true, y_pred_mu, y_pred_log_var, y_pred_log_var_aux):
r = torch.abs(y_true - y_pred_mu)
# + 0.5 * y_pred_log_var +
loss = torch.mean(0.5 * torch.exp(-y_pred_log_var) * r ** 2) + \\
self.config["lambda_match"] * torch.mean(torch.abs(torch.exp(0.5 * y_pred_log_var) - torch.exp(0.5 * y_pred_log_var_aux)))
return loss
def _aux_model_loss(self, y_true, y_pred_mu, y_pred_log_var_aux):
deltal = deltau = 2.0 * torch.exp(0.5 * y_pred_log_var_aux)
upper = y_pred_mu + deltau
lower = y_pred_mu - deltal
width = upper - lower
r = torch.abs(y_true - y_pred_mu)
emce = torch.mean(torch.sigmoid((y_true - lower) * (upper - y_true) * 100000))
loss_emce = torch.abs(self.config["calibration_alpha"]-emce)
loss_noise = torch.mean(torch.abs(0.5 * width - r))
loss_sharpness = torch.mean(torch.abs(upper - y_true)) + torch.mean(torch.abs(lower - y_true))
#print(emce)
return loss_emce + self.config["lambda_noise"] * loss_noise + self.config["lambda_sharpness"] * loss_sharpness
def fit(self, X, y):
""" Fit the Auxiliary Interval Predictor model.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the training data.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
X = torch.from_numpy(X).float().to(self.device)
y = torch.from_numpy(y).float().to(self.device)
dataset_loader = DataLoader(
TensorDataset(X,y),
batch_size=self.config["batch_size"]
)
optimizer_main_model = torch.optim.Adam(self.main_model.parameters(), lr=self.config["lr"])
optimizer_aux_model = torch.optim.Adam(self.aux_model.parameters(), lr=self.config["lr"])
for it in range(self.config["num_outer_iters"]):
# Train the main model
for epoch in range(self.config["num_main_iters"]):
avg_mean_model_loss = 0.0
for batch_x, batch_y in dataset_loader:
self.main_model.train()
self.aux_model.eval()
batch_y_pred_log_var_aux = self.aux_model(batch_x)
batch_y_pred_mu, batch_y_pred_log_var = self.main_model(batch_x)
main_loss = self._main_model_loss(batch_y, batch_y_pred_mu, batch_y_pred_log_var, batch_y_pred_log_var_aux)
optimizer_main_model.zero_grad()
main_loss.backward()
optimizer_main_model.step()
avg_mean_model_loss += main_loss.item()/len(dataset_loader)
if self.verbose:
print("Iter: {}, Epoch: {}, main_model_loss = {}".format(it, epoch, avg_mean_model_loss))
# Train the auxiliary model
for epoch in range(self.config["num_aux_iters"]):
avg_aux_model_loss = 0.0
for batch_x, batch_y in dataset_loader:
self.aux_model.train()
self.main_model.eval()
batch_y_pred_log_var_aux = self.aux_model(batch_x)
batch_y_pred_mu, batch_y_pred_log_var = self.main_model(batch_x)
aux_loss = self._aux_model_loss(batch_y, batch_y_pred_mu, batch_y_pred_log_var_aux)
optimizer_aux_model.zero_grad()
aux |
_loss.backward()
optimizer_aux_model.step()
avg_aux_model_loss += aux_loss.item() / len(dataset_loader)
if self.verbose:
print("Iter: {}, Epoch: {}, aux_model_loss = {}".format(it, epoch, avg_aux_model_loss))
return self
def predict(self, X, return_dists=False):
"""
Obtain predictions for the test points.
In addition to the mean and lower/upper bounds, also returns full predictive distribution (return_dists=True).
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
return_dists: If True, the predictive distribution for each instance using scipy distributions is returned.
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
dists: list of predictive distribution as `scipy.stats` objects with length n_samples.
Only returned when `return_dists` is True.
"""
self.main_model.eval()
X = torch.from_numpy(X).float().to(self.device)
dataset_loader = DataLoader(
X,
batch_size=self.config["batch_size"]
)
y_mean_list = []
y_log_var_list = []
for batch_x in dataset_loader:
batch_y_pred_mu, batch_y_pred_log_var = self.main_model(batch_x)
y_mean_list.append(batch_y_pred_mu.data.cpu().numpy())
y_log_var_list.append(batch_y_pred_log_var.data.cpu().numpy())
y_mean = np.concatenate(y_mean_list)
y_log_var = np.concatenate(y_log_var_list)
y_std = np.sqrt(np.exp(y_log_var))
y_lower = y_mean - 2.0*y_std
y_upper = y_mean + 2.0*y_std
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
if return_dists:
dists = [norm(loc=y_mean[i], scale=y_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_dists',))
res = Result(*res, y_dists=dists)
return res
<s> from .auxiliary_interval_predictor import AuxiliaryIntervalPredictor
<s> from .infinitesimal_jackknife import InfinitesimalJackknife
<s> from collections import namedtuple
import numpy as np
from uq360.algorithms.posthocuq import PostHocUQ
class InfinitesimalJackknife(PostHocUQ):
"""
Performs a first order Taylor series expansion around MLE / MAP fit.
Requires the model being probed to be twice differentiable.
"""
def __init__(self, params, gradients, hessian, config):
""" Initialize IJ.
Args:
params: MLE / MAP fit around which uncertainty is sought. d*1
gradients: Per data point gradients, estimated at the MLE / MAP fit. d*n
hessian: Hessian evaluated at the MLE / MAP fit. d*d
"""
super(InfinitesimalJackknife).__init__()
self.params_one = params
self.gradients = gradients
self.hessian = hessian
self.d, self.n = gradients.shape
self.dParams_dWeights = -np.linalg.solve(self.hessian, self.gradients)
self.approx_dParams_dWeights = -np.linalg.solve(np.diag(np.diag(self.hessian)), self.gradients)
self.w_one = np.ones([self.n])
self.config = config
def get_params(self, deep=True):
return {"params": self.params, "config": self.config, "gradients": self.gradients,
"hessian": self.hessian}
def _process_pretrained_model(self, *argv, **kwargs):
pass
def get_parameter_uncertainty(self):
if (self.config['resampling_strategy'] == "jackknife") or (self.config['resampling_strategy'] == "jackknife+"):
w_query = np.ones_like(self.w_one)
resampled_params = np.zeros([self.n, self.d])
for i in np.arange(self.n):
w_query[i] = 0
resampled_params[i] = self.ij(w_query)
w_query[i] = 1
return np.cov(resampled_params), resampled_params
elif self.config['resampling_strategy'] == "bootstrap":
pass
else:
raise NotImplementedError("Only jackknife, jackknife+, and bootstrap resampling strategies are supported")
def predict(self, X, model):
"""
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
model: model object, must implement a set_parameters function
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
"""
n, _ = X.shape
y_all = model.predict(X)
_, d_out = y_all.shape
params_cov, params = self.get_parameter_uncertainty()
if d_out > 1:
print("Quantiles are computed independently for each dimension. May not be accurate.")
y = np.zeros([params.shape[0], n, d_out])
for i in np.arange(params.shape[0]):
model.set_parameters(params[i])
y[i] = model.predict(X)
y_lower = np.quantile(y, q=0.5 * self.config['alpha'], axis=0)
y_upper = np.quantile(y, q=(1. - 0.5 * self.config['alpha']), axis=0)
y_mean = y.mean(axis=0)
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
return res
def ij(self, w_query):
"""
Args:
w_query: A n*1 vector to query parameters at.
Return:
new parameters at w_query
"""
assert w_query.shape[0] == self.n
return self.params_one + self.dParams_dWeights @ (w_query-self.w_one).T
def approx_ij(self, w_query):
"""
Args:
w_query: A n*1 vector to query parameters at.
Return:
new parameters at w_query
"""
assert w_query.shape[0] == self.n
return self.params_one + self.approx_dParams_dWeights @ (w_query-self.w_one).T<s> import copy
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.utils.data as data_utils
from scipy.stats import norm
from sklearn.preprocessing import StandardScaler
from uq360.algorithms.builtinuq import BuiltinUQ
from uq360.models.bayesian_neural_networks.bnn_models import horseshoe_mlp, bayesian_mlp
class BnnRegression(BuiltinUQ):
"""
Variationally trained BNNs with Gaussian and Horseshoe [6]_ priors for regression.
References:
.. [6] Ghosh, Soumya, Jiayu Yao, and Finale Doshi-Velez. "Structured variational learning of Bayesian neural
networks with horseshoe priors." International Conference on Machine Learning. PMLR, 2018.
"""
def __init__(self, config, prior="Gaussian"):
"""
Args:
config: a dictionary specifying network and learning hyperparameters.
prior: BNN priors specified as a string. Supported priors are Gaussian, Hshoe, RegHshoe
"""
super(BnnRegression, self).__init__()
self.config = config
if prior == "Gaussian":
self.net = bayesian_mlp.BayesianRegressionNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'])
self.config['use_reg_hshoe'] = None
elif prior == "Hshoe":
self.net = horseshoe_mlp.HshoeRegressionNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'],
hshoe_scale=config['hshoe_scale'])
self.config['use_reg_hshoe'] = False
elif prior == "RegHshoe":
self.net = horseshoe_mlp.HshoeRegressionNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'],
hshoe_scale=config['hshoe_scale'],
use_reg_hshoe=config['use_reg_hshoe'])
self.config['use_reg_hshoe'] = True
else:
raise NotImplementedError("'prior' must be a string. It can be one of Gaussian, Hshoe, RegHshoe")
def get_params(self, deep=True):
return {"prior": self.prior, "config": self.config}
def fit(self, X, y):
""" Fit the BNN regression model.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the training data.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
torch.manual_seed(1234)
optimizer = torch.optim.Adam(self.net.parameters(), lr=self.config['step_size'])
neg_elbo = torch.zeros([self.config['num_epochs'], 1])
params_store = {}
for epoch in range(self.config['num_epochs']):
loss = self.net.neg_elbo(num_batches=1, x=X, y=y.float().unsqueeze(dim=1)) / X.shape[0]
optimizer.zero_grad()
loss.backward()
optimizer.step()
if hasattr(self.net, 'fixed_point_updates'):
# for hshoe or regularized hshoe nets
self.net.fixed_point_updates()
neg_elbo[epoch] = loss.item()
if (epoch + 1) % 10 == 0:
# print ((net.noise_layer.bhat/net.noise_layer.ahat).data.numpy()[0])
print('Epoch[{}/{}], neg elbo: {:.6f}, noise var: {:.6f}'
.format(epoch + 1, self.config['num_epochs'], neg_elbo[epoch].item() / X.shape[0],
self.net.get_noise_var()))
params_store[epoch] = copy.deepcopy(self.net.state_dict()) # for small nets we can just store all.
best_model_id = neg_elbo.argmin() # loss_val_store.argmin() #
self.net.load_state_dict(params_store[best_model_id.item()])
return self
def predict(self, X, mc_samples=100, return_dists=False, return_epistemic=True, return_epistemic_dists=False):
"""
Obtain predictions for the test points.
In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True)
and full predictive distribution (return_dists=True).
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
mc_samples: Number of Monte-Carlo samples.
return_dists: If True, the predictive distribution for each instance using scipy distributions is returned.
return_epistemic: if True, the epistemic upper and lower bounds are returned.
return_epistemic_dists: If True, the epistemic distribution for each instance using scipy distributions
is returned.
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
y_lower_epistemic: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of epistemic component of the predictive distribution of the test points.
Only returned when `return_epistemic` is True.
y_upper_epistemic: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of epistemic component of the predictive distribution of the test points.
Only returned when `return_epistemic` is True.
dists: list of predictive distribution as `scipy.stats` objects with length n_samples.
Only returned when `return_dists` is True.
"""
epistemic_out = np.zeros([mc_samples, X.shape[0]])
total_out = np.zeros([mc_samples, X.shape[0]])
for s in np.arange(mc_samples):
pred = self.net(X).data.numpy().ravel()
epistemic_out[s] = pred
total_out[s] = pred + np.sqrt(self.net.get_noise_var()) * np.random.randn(pred.shape[0])
y |
_total_std = np.std(total_out, axis=0)
y_epi_std = np.std(epistemic_out, axis=0)
y_mean = np.mean(total_out, axis=0)
y_lower = y_mean - 2 * y_total_std
y_upper = y_mean + 2 * y_total_std
y_epi_lower = y_mean - 2 * y_epi_std
y_epi_upper = y_mean + 2 * y_epi_std
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
if return_epistemic:
Result = namedtuple('res', Result._fields + ('lower_epistemic', 'upper_epistemic',))
res = Result(*res, lower_epistemic=y_epi_lower, upper_epistemic=y_epi_upper)
if return_dists:
dists = [norm(loc=y_mean[i], scale=y_total_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_dists',))
res = Result(*res, y_dists=dists)
if return_epistemic_dists:
epi_dists = [norm(loc=y_mean[i], scale=y_epi_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_epistemic_dists',))
res = Result(*res, y_epistemic_dists=epi_dists)
return res
class BnnClassification(BuiltinUQ):
"""
Variationally trained BNNs with Gaussian and Horseshoe [6]_ priors for classification.
"""
def __init__(self, config, prior="Gaussian", device=None):
"""
Args:
config: a dictionary specifying network and learning hyperparameters.
prior: BNN priors specified as a string. Supported priors are Gaussian, Hshoe, RegHshoe
"""
super(BnnClassification, self).__init__()
self.config = config
self.device = device
if prior == "Gaussian":
self.net = bayesian_mlp.BayesianClassificationNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'])
self.config['use_reg_hshoe'] = None
elif prior == "Hshoe":
self.net = horseshoe_mlp.HshoeClassificationNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'],
hshoe_scale=config['hshoe_scale'])
self.config['use_reg_hshoe'] = False
elif prior == "RegHshoe":
self.net = horseshoe_mlp.HshoeClassificationNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'],
num_nodes=config['num_nodes'], num_layers=config['num_layers'],
hshoe_scale=config['hshoe_scale'],
use_reg_hshoe=config['use_reg_hshoe'])
self.config['use_reg_hshoe'] = True
else:
raise NotImplementedError("'prior' must be a string. It can be one of Gaussian, Hshoe, RegHshoe")
if "batch_size" not in self.config:
self.config["batch_size"] = 50
self.net = self.net.to(device)
def get_params(self, deep=True):
return {"prior": self.prior, "config": self.config, "device": self.device}
def fit(self, X=None, y=None, train_loader=None):
""" Fits BNN regression model.
Args:
X: array-like of shape (n_samples, n_features) or (n_samples, n_classes).
Features vectors of the training data or the probability scores from the base model.
Ignored if train_loader is not None.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Ignored if train_loader is not None.
train_loader: pytorch train_loader object.
Returns:
self
"""
if train_loader is None:
train = data_utils.TensorDataset(torch.Tensor(X), torch.Tensor(y.values).long())
train_loader = data_utils.DataLoader(train, batch_size=self.config['batch_size'], shuffle=True)
torch.manual_seed(1234)
optimizer = torch.optim.Adam(self.net.parameters(), lr=self.config['step_size'])
neg_elbo = torch.zeros([self.config['num_epochs'], 1])
params_store = {}
for epoch in range(self.config['num_epochs']):
avg_loss = 0.0
for batch_x, batch_y in train_loader:
loss = self.net.neg_elbo(num_batches=len(train_loader), x=batch_x, y=batch_y) / batch_x.size(0)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if hasattr(self.net, 'fixed_point_updates'):
# for hshoe or regularized hshoe nets
self.net.fixed_point_updates()
avg_loss += loss.item()
neg_elbo[epoch] = avg_loss / len(train_loader)
if (epoch + 1) % 10 == 0:
# print ((net.noise_layer.bhat/net.noise_layer.ahat).data.numpy()[0])
print('Epoch[{}/{}], neg elbo: {:.6f}'
.format(epoch + 1, self.config['num_epochs'], neg_elbo[epoch].item()))
params_store[epoch] = copy.deepcopy(self.net.state_dict()) # for small nets we can just store all.
best_model_id = neg_elbo.argmin() # loss_val_store.argmin() #
self.net.load_state_dict(params_store[best_model_id.item()])
return self
def predict(self, X, mc_samples=100):
"""
Obtain calibrated predictions for the test points.
Args:
X: array-like of shape (n_samples, n_features) or (n_samples, n_classes).
Features vectors of the training data or the probability scores from the base model.
mc_samples: Number of Monte-Carlo samples.
Returns:
namedtuple: A namedtupe that holds
y_pred: ndarray of shape (n_samples,)
Predicted labels of the test points.
y_prob: ndarray of shape (n_samples, n_classes)
Predicted probability scores of the classes.
y_prob_var: ndarray of shape (n_samples,)
Variance of the prediction on the test points.
y_prob_samples: ndarray of shape (mc_samples, n_samples, n_classes)
Samples from the predictive distribution.
"""
X = torch.Tensor(X)
y_prob_samples = [F.softmax(self.net(X), dim=1).detach().numpy() for _ in np.arange(mc_samples)]
y_prob_samples_stacked = np.stack(y_prob_samples)
prob_mean = np.mean(y_prob_samples_stacked, 0)
prob_var = np.std(y_prob_samples_stacked, 0) ** 2
if len(np.shape(prob_mean)) == 1:
y_pred_labels = prob_mean > 0.5
else:
y_pred_labels = np.argmax(prob_mean, axis=1)
Result = namedtuple('res', ['y_pred', 'y_prob', 'y_prob_var', 'y_prob_samples'])
res = Result(y_pred_labels, prob_mean, prob_var, y_prob_samples)
return res
<s><s> import inspect
from collections import namedtuple
import numpy as np
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.exceptions import NotFittedError
from uq360.algorithms.posthocuq import PostHocUQ
class BlackboxMetamodelClassification(PostHocUQ):
""" Extracts confidence scores from black-box classification models using a meta-model [4]_ .
References:
.. [4] Chen, Tongfei, et al. "Confidence scoring using whitebox meta-models with linear classifier probes."
The 22nd International Conference on Artificial Intelligence and Statistics. PMLR, 2019.
"""
def _create_named_model(self, mdltype, config):
""" Instantiates a model by name passed in 'mdltype'.
Args:
mdltype: string with name (must be supported)
config: dict with args passed in the instantiation call
Returns:
mdl instance
"""
assert (isinstance(mdltype, str))
if mdltype == 'lr':
mdl = LogisticRegression(**config)
elif mdltype == 'gbm':
mdl = GradientBoostingClassifier(**config)
else:
raise NotImplementedError("ERROR: Requested model type unknown: \\"%s\\"" % mdltype)
return mdl
def _get_model_instance(self, model, config):
""" Returns an instance of a model based on (a) a desired name or (b) passed in class, or
(c) passed in instance.
:param model: string, class, or instance. Class and instance must have certain methods callable.
:param config: dict with args passed in during the instantiation
:return: model instance
"""
assert (model is not None and config is not None)
if isinstance(model, str): # 'model' is a name, create it
mdl = self._create_named_model(model, config)
elif inspect.isclass(model): # 'model' is a class, instantiate it
mdl = model(**config)
else: # 'model' is an instance, register it
mdl = model
if not all([hasattr(mdl, key) and callable(getattr(mdl, key)) for key in self.callable_keys]):
raise ValueError("ERROR: Passed model/method failed the interface test. Methods required: %s" %
','.join(self.callable_keys))
return mdl
def __init__(self, base_model=None, meta_model=None, base_config=None, meta_config=None, random_seed=42):
"""
:param base_model: Base model. Can be:
(1) None (default mdl will be set up),
(2) Named model (e.g., logistic regression 'lr' or gradient boosting machine 'gbm'),
(3) Base model class declaration (e.g., sklearn.linear_model.LogisticRegression). Will instantiate.
(4) Model instance (instantiated outside). Will be re-used. Must have certain callable methods.
Note: user-supplied classes and models must have certain callable methods ('predict', 'fit')
and be capable of raising NotFittedError.
:param meta_model: Meta model. Same values possible as with 'base_model'
:param base_config: None or a params dict to be passed to 'base_model' at instantiation
:param meta_config: None or a params dict to be passed to 'meta_model' at instantiation
:param random_seed: seed used in the various pipeline steps
"""
super(BlackboxMetamodelClassification).__init__()
self.random_seed = random_seed
self.callable_keys = ['predict', 'fit'] # required methods - must be present in models passed in
self.base_model_default = 'gbm'
self.meta_model_default = 'lr'
self.base_config_default = {'n_estimators': 300, 'max_depth': 10,
'learning_rate': 0.001, 'min_samples_leaf': 10, 'min_samples_split': 10,
'random_state': self.random_seed}
self.meta_config_default = {'penalty': 'l1', 'C': 1, 'solver': 'liblinear', 'random_state': self.random_seed}
self.base_config = base_config if base_config is not None else self.base_config_default
self.meta_config = meta_config if meta_config is not None else self.meta_config_default
self.base_model = None
self.meta_model = None
self.base_model = self._get_model_instance(base_model if base_model is not None else self.base_model_default,
self.base_config)
self.meta_model = self._get_model_instance(meta_model if meta_model is not None else self.meta_model_default,
self.meta_config)
def get_params(self, deep=True):
return {"base_model": self.base_model, "meta_model": self.meta_model, "base_config": self.base_config,
"meta_config": self.meta_config, "random_seed": self.random_seed}
def _process_pretrained_model(self, X, y_hat_proba):
"""
Given the original input features and the base output probabilities, generate input features
to train a meta model. Current implementation copies all input features and appends.
:param X: numpy [nsamples, dim]
:param y_hat_proba: [nsamples, nclasses]
:return: array with new features [nsamples, newdim]
"""
assert (len(y_hat_proba.shape) == 2)
assert (X.shape[0] == y_hat_proba.shape[0])
# sort the probs sample by sample
faux1 = np.sort(y_hat_proba, axis=-1)
# add delta between top and second candidate
faux2 = np.expand_dims(faux1[:, -1] - faux1[:, -2], axis=-1)
return np.hstack([X, faux1, faux2])
def fit(self, X, y, meta_fraction=0.2, randomize_samples=True, base_is_prefitted=False,
meta_train_data=(None, None)):
"""
Fit base and meta models.
:param X: input to the base model,
array-like of shape (n_samples, n_features).
Features vectors of the training data.
:param y: ground truth for the base model,
array-like of shape (n_samples,)
:param meta_fraction: float in [0,1] - a fractional size of the partition carved out to train the meta model
(complement will be used to train the base model)
:param randomize_samples: use shuffling when creating partitions
|
:param base_is_prefitted: Setting True will skip fitting the base model (useful for base models that have been
instantiated outside/by the user and are already fitted.
:param meta_train_data: User supplied data to train the meta model. Note that this option should only be used
with 'base_is_prefitted'==True. Pass a tuple meta_train_data=(X_meta, y_meta) to activate.
Note that (X,y,meta_fraction, randomize_samples) will be ignored in this mode.
:return: self
"""
X = np.asarray(X)
y = np.asarray(y)
assert (len(meta_train_data) == 2)
if meta_train_data[0] is None:
X_base, X_meta, y_base, y_meta = train_test_split(X, y, shuffle=randomize_samples, test_size=meta_fraction,
random_state=self.random_seed)
else:
if not base_is_prefitted:
raise ValueError("ERROR: fit(): base model must be pre-fitted to use the 'meta_train_data' option")
X_base = y_base = None
X_meta = meta_train_data[0]
y_meta = meta_train_data[1]
# fit the base model
if not base_is_prefitted:
self.base_model.fit(X_base, y_base)
# get input for the meta model from the base
try:
y_hat_meta_proba = self.base_model.predict_proba(X_meta)
# determine correct-incorrect outcome - these are targets for the meta model trainer
# y_hat_meta_targets = np.asarray((y_meta == np.argmax(y_hat_meta_proba, axis=-1)), dtype=np.int) -- Fix for python 3.8.11 update (in 2.9.0.8)
y_hat_meta_targets = np.asarray((y_meta == np.argmax(y_hat_meta_proba, axis=-1)), dtype=int)
except NotFittedError as e:
raise RuntimeError("ERROR: fit(): The base model appears not pre-fitted (%s)" % repr(e))
# get input features for meta training
X_meta_in = self._process_pretrained_model(X_meta, y_hat_meta_proba)
# train meta model to predict 'correct' vs. 'incorrect' of the base
self.meta_model.fit(X_meta_in, y_hat_meta_targets)
return self
def predict(self, X):
"""
Generate a base prediction along with uncertainty/confidence for data X.
:param X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
:return: namedtuple: A namedtuple that holds
y_pred: ndarray of shape (n_samples,)
Predicted labels of the test points.
y_score: ndarray of shape (n_samples,)
Confidence score the test points.
"""
y_hat_proba = self.base_model.predict_proba(X)
y_hat = np.argmax(y_hat_proba, axis=-1)
X_meta_in = self._process_pretrained_model(X, y_hat_proba)
z_hat = self.meta_model.predict_proba(X_meta_in)
index_of_class_1 = np.where(self.meta_model.classes_ == 1)[0][0] # class 1 corresponds to probab of positive/correct outcome
Result = namedtuple('res', ['y_pred', 'y_score'])
res = Result(y_hat, z_hat[:, index_of_class_1])
return res
<s> from .blackbox_metamodel_regression import BlackboxMetamodelRegression
from .blackbox_metamodel_classification import BlackboxMetamodelClassification
<s> import inspect
from collections import namedtuple
import numpy as np
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import train_test_split
from sklearn.exceptions import NotFittedError
from uq360.algorithms.posthocuq import PostHocUQ
class BlackboxMetamodelRegression(PostHocUQ):
""" Extracts confidence scores from black-box regression models using a meta-model [2]_ .
References:
.. [2] Chen, Tongfei, et al. Confidence scoring using whitebox meta-models with linear classifier probes.
The 22nd International Conference on Artificial Intelligence and Statistics. PMLR, 2019.
"""
def _create_named_model(self, mdltype, config):
"""
Instantiates a model by name passed in 'mdltype'
:param mdltype: string with name (must be supprted)
:param config: dict with args passed in the instantiation call
:return: mdl instance
"""
assert (isinstance(mdltype, str))
if mdltype == 'gbr':
mdl = GradientBoostingRegressor(**config)
else:
raise NotImplementedError("ERROR: Requested model type unknown: \\"%s\\"" % mdltype)
return mdl
def _get_model_instance(self, model, config):
"""
Returns an instance of a model based on (a) a desired name or (b) passed in class, or
(c) passed in instance
:param model: string, class, or instance. Class and instance must have certain methods callable.
:param config: dict with args passed in during the instantiation
:return: model instance
"""
assert (model is not None and config is not None)
if isinstance(model, str): # 'model' is a name, create it
mdl = self._create_named_model(model, config)
elif inspect.isclass(model): # 'model' is a class, instantiate it
mdl = model(**config)
else: # 'model' is an instance, register it
mdl = model
if not all([hasattr(mdl, key) and callable(getattr(mdl, key)) for key in self.callable_keys]):
raise ValueError("ERROR: Passed model/method failed the interface test. Methods required: %s" %
','.join(self.callable_keys))
return mdl
def __init__(self, base_model=None, meta_model=None, base_config=None, meta_config=None, random_seed=42):
"""
:param base_model: Base model. Can be:
(1) None (default mdl will be set up),
(2) Named model (e.g., 'gbr'),
(3) Base model class declaration (e.g., sklearn.linear_model.LinearRegressor). Will instantiate.
(4) Model instance (instantiated outside). Will be re-used. Must have required callable methods.
Note: user-supplied classes and models must have certain callable methods ('predict', 'fit')
and be capable of raising NotFittedError.
:param meta_model: Meta model. Same values possible as with 'base_model'
:param base_config: None or a params dict to be passed to 'base_model' at instantiation
:param meta_config: None or a params dict to be passed to 'meta_model' at instantiation
:param random_seed: seed used in the various pipeline steps
"""
super(BlackboxMetamodelRegression).__init__()
self.random_seed = random_seed
self.callable_keys = ['predict', 'fit'] # required methods - must be present in models passed in
self.base_model_default = 'gbr'
self.meta_model_default = 'gbr'
self.base_config_default = {'loss': 'ls', 'n_estimators': 300, 'max_depth': 10, 'learning_rate': 0.001,
'min_samples_leaf': 10, 'min_samples_split': 10, 'random_state': self.random_seed}
self.meta_config_default = {'loss': 'quantile', 'alpha': 0.95, 'n_estimators': 300, 'max_depth': 10,
'learning_rate': 0.001, 'min_samples_leaf': 10, 'min_samples_split': 10,
'random_state': self.random_seed}
self.base_config = base_config if base_config is not None else self.base_config_default
self.meta_config = meta_config if meta_config is not None else self.meta_config_default
self.base_model = None
self.meta_model = None
self.base_model = self._get_model_instance(base_model if base_model is not None else self.base_model_default,
self.base_config)
self.meta_model = self._get_model_instance(meta_model if meta_model is not None else self.meta_model_default,
self.meta_config)
def get_params(self, deep=True):
return {"base_model": self.base_model, "meta_model": self.meta_model, "base_config": self.base_config,
"meta_config": self.meta_config, "random_seed": self.random_seed}
def fit(self, X, y, meta_fraction=0.2, randomize_samples=True, base_is_prefitted=False,
meta_train_data=(None, None)):
"""
Fit base and meta models.
:param X: input to the base model
:param y: ground truth for the base model
:param meta_fraction: float in [0,1] - a fractional size of the partition carved out to train the meta model
(complement will be used to train the base model)
:param randomize_samples: use shuffling when creating partitions
:param base_is_prefitted: Setting True will skip fitting the base model (useful for base models that have been
instantiated outside/by the user and are already fitted.
:param meta_train_data: User supplied data to train the meta model. Note that this option should only be used
with 'base_is_prefitted'==True. Pass a tuple meta_train_data=(X_meta, y_meta) to activate.
Note that (X,y,meta_fraction, randomize_samples) will be ignored in this mode.
:return: self
"""
X = np.asarray(X)
y = np.asarray(y)
assert(len(meta_train_data)==2)
if meta_train_data[0] is None:
X_base, X_meta, y_base, y_meta = train_test_split(X, y, shuffle=randomize_samples, test_size=meta_fraction,
random_state=self.random_seed)
else:
if not base_is_prefitted:
raise ValueError("ERROR: fit(): base model must be pre-fitted to use the 'meta_train_data' option")
X_base = y_base = None
X_meta = meta_train_data[0]
y_meta = meta_train_data[1]
# fit the base model
if not base_is_prefitted:
self.base_model.fit(X_base, y_base)
# get input for the meta model from the base
try:
y_hat_meta = self.base_model.predict(X_meta)
except NotFittedError as e:
raise RuntimeError("ERROR: fit(): The base model appears not pre-fitted (%s)" % repr(e))
# used base input and output as meta input
X_meta_in = self._process_pretrained_model(X_meta, y_hat_meta)
# train meta model to predict abs diff
self.meta_model.fit(X_meta_in, np.abs(y_hat_meta - y_meta))
return self
def _process_pretrained_model(self, X, y_hat):
"""
Given the original input features and the base output probabilities, generate input features
to train a meta model. Current implementation copies all input features and appends.
:param X: numpy [nsamples, dim]
:param y_hat: [nsamples,]
:return: array with new features [nsamples, newdim]
"""
y_hat_meta_prime = np.expand_dims(y_hat, -1) if len(y_hat.shape) < 2 else y_hat
X_meta_in = np.hstack([X, y_hat_meta_prime])
return X_meta_in
def predict(self, X):
"""
Generate prediction and uncertainty bounds for data X.
:param X: input features
:return: namedtuple: A namedtuple that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
"""
y_hat = self.base_model.predict(X)
y_hat_prime = np.expand_dims(y_hat, -1) if len(y_hat.shape) < 2 else y_hat
X_meta_in = np.hstack([X, y_hat_prime])
z_hat = self.meta_model.predict(X_meta_in)
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_hat, y_hat - z_hat, y_hat + z_hat)
return res
<s> from .quantile_regression import QuantileRegression
<s> from collections import namedtuple
from sklearn.ensemble import GradientBoostingRegressor
from uq360.algorithms.builtinuq import BuiltinUQ
class QuantileRegression(BuiltinUQ):
"""Quantile Regression uses quantile loss and learns two separate models for the upper and lower quantile
to obtain the prediction intervals.
"""
def __init__(self, model_type="gbr", config=None):
"""
Args:
model_type: The base model used for predicting a quantile. Currently supported values are [gbr].
gbr is sklearn GradientBoostingRegressor.
config: dictionary containing the config parameters for the model.
"""
super(QuantileRegression).__init__()
if config is not None:
self.config = config
else:
self.config = {}
if "alpha" not in self.config:
self.config["alpha"] = 0.95
if model_type == "gbr":
self.model_type = model_type
self.model_mean = GradientBoostingRegressor(
loss='ls',
n_estimators=self.config["n_estimators"],
max_depth=self.config["max_depth"],
learning_rate=self.config["learning_rate"],
min_samples_leaf=self.config["min_samples_leaf"],
min_samples_split=self.config["min_samples_split"]
)
self.model_upper = GradientBoostingRegressor(
loss='quantile',
alpha=self.config["alpha"],
n_estimators=self.config["n_estimators"],
max_depth=self.config["max_depth"],
learning_rate=self.config["learning_rate"],
|
min_samples_leaf=self.config["min_samples_leaf"],
min_samples_split=self.config["min_samples_split"]
)
self.model_lower = GradientBoostingRegressor(
loss='quantile',
alpha=1.0 - self.config["alpha"],
n_estimators=self.config["n_estimators"],
max_depth=self.config["max_depth"],
learning_rate=self.config["learning_rate"],
min_samples_leaf=self.config["min_samples_leaf"],
min_samples_split=self.config["min_samples_split"])
else:
raise NotImplementedError
def get_params(self, deep=True):
return {"model_type": self.model_type, "config": self.config}
def fit(self, X, y):
""" Fit the Quantile Regression model.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the training data.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
self.model_mean.fit(X, y)
self.model_lower.fit(X, y)
self.model_upper.fit(X, y)
return self
def predict(self, X):
"""
Obtain predictions for the test points.
In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True)
and full predictive distribution (return_dists=True).
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
"""
y_mean = self.model_mean.predict(X)
y_lower = self.model_lower.predict(X)
y_upper = self.model_upper.predict(X)
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
return res
<s> from collections import namedtuple
import botorch
import gpytorch
import numpy as np
import torch
from botorch.models import SingleTaskGP
from botorch.utils.transforms import normalize
from gpytorch.constraints import GreaterThan
from scipy.stats import norm
from sklearn.preprocessing import StandardScaler
from uq360.algorithms.builtinuq import BuiltinUQ
np.random.seed(42)
torch.manual_seed(42)
class HomoscedasticGPRegression(BuiltinUQ):
""" A wrapper around Botorch SingleTask Gaussian Process Regression [1]_ with homoscedastic noise.
References:
.. [1] https://botorch.org/api/models.html#singletaskgp
"""
def __init__(self,
kernel=gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel()),
likelihood=None,
config=None):
"""
Args:
kernel: gpytorch kernel function with default set to `RBFKernel` with output scale.
likelihood: gpytorch likelihood function with default set to `GaussianLikelihood`.
config: dictionary containing the config parameters for the model.
"""
super(HomoscedasticGPRegression).__init__()
self.config = config
self.kernel = kernel
self.likelihood = likelihood
self.model = None
self.scaler = StandardScaler()
self.X_bounds = None
def get_params(self, deep=True):
return {"kernel": self.kernel, "likelihood": self.likelihood, "config": self.config}
def fit(self, X, y, **kwargs):
"""
Fit the GP Regression model.
Additional arguments relevant for SingleTaskGP fitting can be passed to this function.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the training data.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
**kwargs: Additional arguments relevant for SingleTaskGP fitting.
Returns:
self
"""
y = self.scaler.fit_transform(y)
X, y = torch.tensor(X), torch.tensor(y)
self.X_bounds = X_bounds = torch.stack([X.min() * torch.ones(X.shape[1]),
X.max() * torch.ones(X.shape[1])])
X = normalize(X, X_bounds)
model_homo = SingleTaskGP(train_X=X, train_Y=y, covar_module=self.kernel, likelihood=self.likelihood, **kwargs)
model_homo.likelihood.noise_covar.register_constraint("raw_noise", GreaterThan(1e-5))
model_homo_marginal_log_lik = gpytorch.mlls.ExactMarginalLogLikelihood(model_homo.likelihood, model_homo)
botorch.fit.fit_gpytorch_model(model_homo_marginal_log_lik)
model_homo_marginal_log_lik.eval()
self.model = model_homo_marginal_log_lik
self.inferred_observation_noise = self.scaler.inverse_transform(self.model.likelihood.noise.detach().numpy()[0].reshape(1,1)).squeeze()
return self
def predict(self, X, return_dists=False, return_epistemic=False, return_epistemic_dists=False):
"""
Obtain predictions for the test points.
In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True)
and full predictive distribution (return_dists=True).
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
return_dists: If True, the predictive distribution for each instance using scipy distributions is returned.
return_epistemic: if True, the epistemic upper and lower bounds are returned.
return_epistemic_dists: If True, the epistemic distribution for each instance using scipy distributions
is returned.
Returns:
namedtuple: A namedtuple that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
y_lower_epistemic: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of epistemic component of the predictive distribution of the test points.
Only returned when `return_epistemic` is True.
y_upper_epistemic: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of epistemic component of the predictive distribution of the test points.
Only returned when `return_epistemic` is True.
dists: list of predictive distribution as `scipy.stats` objects with length n_samples.
Only returned when `return_dists` is True.
"""
X = torch.tensor(X)
X_test_norm = normalize(X, self.X_bounds)
self.model.eval()
with torch.no_grad():
posterior = self.model.model.posterior(X_test_norm)
y_mean = posterior.mean
#y_epi_std = torch.sqrt(posterior.variance)
y_lower_epistemic, y_upper_epistemic = posterior.mvn.confidence_region()
predictive_posterior = self.model.model.posterior(X_test_norm, observation_noise=True)
#y_std = torch.sqrt(predictive_posterior.variance)
y_lower_total, y_upper_total = predictive_posterior.mvn.confidence_region()
y_mean, y_lower, y_upper, y_lower_epistemic, y_upper_epistemic = self.scaler.inverse_transform(y_mean.numpy()).squeeze(), \\
self.scaler.inverse_transform(y_lower_total.numpy()).squeeze(),\\
self.scaler.inverse_transform(y_upper_total.numpy()).squeeze(),\\
self.scaler.inverse_transform(y_lower_epistemic.numpy()).squeeze(),\\
self.scaler.inverse_transform(y_upper_epistemic.numpy()).squeeze()
y_epi_std = (y_upper_epistemic - y_lower_epistemic) / 4.0
y_std = (y_upper_total - y_lower_total) / 4.0
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_mean, y_lower, y_upper)
if return_epistemic:
Result = namedtuple('res', Result._fields + ('y_lower_epistemic', 'y_upper_epistemic',))
res = Result(*res, y_lower_epistemic=y_lower_epistemic, y_upper_epistemic=y_upper_epistemic)
if return_dists:
dists = [norm(loc=y_mean[i], scale=y_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_dists',))
res = Result(*res, y_dists=dists)
if return_epistemic_dists:
epi_dists = [norm(loc=y_mean[i], scale=y_epi_std[i]) for i in range(y_mean.shape[0])]
Result = namedtuple('res', Result._fields + ('y_epistemic_dists',))
res = Result(*res, y_epistemic_dists=epi_dists)
return res
<s> from .homoscedastic_gaussian_process_regression import HomoscedasticGPRegression<s> from .ucc_recalibration import UCCRecalibration
<s> from collections import namedtuple
from uq360.algorithms.posthocuq import PostHocUQ
from uq360.utils.misc import form_D_for_auucc
from uq360.metrics.uncertainty_characteristics_curve.uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve
class UCCRecalibration(PostHocUQ):
""" Recalibration a regression model to specified operating point using Uncertainty Characteristics Curve.
"""
def __init__(self, base_model):
"""
Args:
base_model: pretrained model to be recalibrated.
"""
super(UCCRecalibration).__init__()
self.base_model = self._process_pretrained_model(base_model)
self.ucc = None
def get_params(self, deep=True):
return {"base_model": self.base_model}
def _process_pretrained_model(self, base_model):
return base_model
def fit(self, X, y):
"""
Fit the Uncertainty Characteristics Curve.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
y_pred_mean, y_pred_lower, y_pred_upper = self.base_model.predict(X)[:3]
bwu = y_pred_upper - y_pred_mean
bwl = y_pred_mean - y_pred_lower
self.ucc = UncertaintyCharacteristicsCurve()
self.ucc.fit(form_D_for_auucc(y_pred_mean, bwl, bwu), y.squeeze())
return self
def predict(self, X, missrate=0.05):
"""
Generate prediction and uncertainty bounds for data X.
Args:
X: array-like of shape (n_samples, n_features).
Features vectors of the test points.
missrate: desired missrate of the new operating point, set to 0.05 by default.
Returns:
namedtuple: A namedtupe that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
"""
C = self.ucc.get_specific_operating_point(req_y_axis_value=missrate, vary_bias=False)
new_scale = C['modvalue']
y_pred_mean, y_pred_lower, y_pred_upper = self.base_model.predict(X)[:3]
bwu = y_pred_upper - y_pred_mean
bwl = y_pred_mean - y_pred_lower
if C['operation'] == 'bias':
calib_y_pred_upper = y_pred_mean + (new_scale + bwu) # lower bound width
calib_y_pred_lower = y_pred_mean - (new_scale + bwl) # Upper bound width
else:
calib_y_pred_upper = y_pred_mean + (new_scale * bwu) # lower bound width
calib_y_pred_lower = y_pred_mean - (new_scale * bwl) # Upper bound width
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_pred_mean, calib_y_pred_lower, calib_y_pred_upper)
return res
<s> from collections import namedtuple
import numpy as np
from sklearn.calibration import CalibratedClassifierCV
from sklearn.preprocessing import LabelEncoder
from uq360.utils.misc import DummySklearnEstimator
from uq360.algorithms.posthocuq import PostHocUQ
class ClassificationCalibration(PostHocUQ):
"""Post hoc calibration of classification models. Currently wraps `CalibratedClassifierCV` from sklearn and allows
non-sklearn models to be calibrated |
.
"""
def __init__(self, num_classes, fit_mode="features", method='isotonic', base_model_prediction_func=None):
"""
Args:
num_classes: number of classes.
fit_mode: features or probs. If probs the `fit` and `predict` operate on the base models probability scores,
useful when these are precomputed.
method: isotonic or sigmoid.
base_model_prediction_func: the function that takes in the input features and produces base model's
probability scores. This is ignored when operating in `probs` mode.
"""
super(ClassificationCalibration).__init__()
if fit_mode == "probs":
# In this case, the fit assumes that it receives the probability scores of the base model.
# create a dummy estimator
self.base_model = DummySklearnEstimator(num_classes, lambda x: x)
else:
self.base_model = DummySklearnEstimator(num_classes, base_model_prediction_func)
self.method = method
def get_params(self, deep=True):
return {"num_classes": self.num_classes, "fit_mode": self.fit_mode, "method": self.method,
"base_model_prediction_func": self.base_model_prediction_func}
def _process_pretrained_model(self, base_model):
return base_model
def fit(self, X, y):
""" Fits calibration model using the provided calibration set.
Args:
X: array-like of shape (n_samples, n_features) or (n_samples, n_classes).
Features vectors of the training data or the probability scores from the base model.
y: array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns:
self
"""
self.base_model.label_encoder_ = LabelEncoder().fit(y)
self.calib_model = CalibratedClassifierCV(base_estimator=self.base_model,
cv="prefit",
method=self.method)
self.calib_model.fit(X, y)
return self
def predict(self, X):
"""
Obtain calibrated predictions for the test points.
Args:
X: array-like of shape (n_samples, n_features) or (n_samples, n_classes).
Features vectors of the training data or the probability scores from the base model.
Returns:
namedtuple: A namedtupe that holds
y_pred: ndarray of shape (n_samples,)
Predicted labels of the test points.
y_prob: ndarray of shape (n_samples, n_classes)
Predicted probability scores of the classes.
"""
y_prob = self.calib_model.predict_proba(X)
if len(np.shape(y_prob)) == 1:
y_pred_labels = y_prob > 0.5
else:
y_pred_labels = np.argmax(y_prob, axis=1)
Result = namedtuple('res', ['y_pred', 'y_prob'])
res = Result(y_pred_labels, y_prob)
return res
<s> from .classification_calibration import ClassificationCalibration
<s> import numpy as np
from scipy.stats import norm
from sklearn.metrics import mean_squared_error, r2_score
from ..utils.misc import fitted_ucc_w_nullref
def picp(y_true, y_lower, y_upper):
"""
Prediction Interval Coverage Probability (PICP). Computes the fraction of samples for which the grounds truth lies
within predicted interval. Measures the prediction interval calibration for regression.
Args:
y_true: Ground truth
y_lower: predicted lower bound
y_upper: predicted upper bound
Returns:
float: the fraction of samples for which the grounds truth lies within predicted interval.
"""
satisfies_upper_bound = y_true <= y_upper
satisfies_lower_bound = y_true >= y_lower
return np.mean(satisfies_upper_bound * satisfies_lower_bound)
def mpiw(y_lower, y_upper):
"""
Mean Prediction Interval Width (MPIW). Computes the average width of the the prediction intervals. Measures the
sharpness of intervals.
Args:
y_lower: predicted lower bound
y_upper: predicted upper bound
Returns:
float: the average width the prediction interval across samples.
"""
return np.mean(np.abs(y_lower - y_upper))
def auucc_gain(y_true, y_mean, y_lower, y_upper):
""" Computes the Area Under the Uncertainty Characteristics Curve (AUUCC) gain wrt to a null reference
with constant band.
Args:
y_true: Ground truth
y_mean: predicted mean
y_lower: predicted lower bound
y_upper: predicted upper bound
Returns:
float: AUUCC gain
"""
u = fitted_ucc_w_nullref(y_true, y_mean, y_lower, y_upper)
auucc = u.get_AUUCC()
assert(isinstance(auucc, list) and len(auucc) == 2), "Failed to calculate auucc gain"
assert (not np.isclose(auucc[1], 0.)), "Failed to calculate auucc gain"
auucc_gain = (auucc[1]-auucc[0])/auucc[0]
return auucc_gain
def negative_log_likelihood_Gaussian(y_true, y_mean, y_lower, y_upper):
""" Computes Gaussian negative_log_likelihood assuming symmetric band around the mean.
Args:
y_true: Ground truth
y_mean: predicted mean
y_lower: predicted lower bound
y_upper: predicted upper bound
Returns:
float: nll
"""
y_std = (y_upper - y_lower) / 4.0
nll = np.mean(-norm.logpdf(y_true.squeeze(), loc=y_mean.squeeze(), scale=y_std.squeeze()))
return nll
def compute_regression_metrics(y_true, y_mean, y_lower, y_upper, option="all", nll_fn=None):
"""
Computes the metrics specified in the option which can be string or a list of strings. Default option `all` computes
the ["rmse", "nll", "auucc_gain", "picp", "mpiw", "r2"] metrics.
Args:
y_true: Ground truth
y_mean: predicted mean
y_lower: predicted lower bound
y_upper: predicted upper bound
option: string or list of string contained the name of the metrics to be computed.
nll_fn: function that evaluates NLL, if None, then computes Gaussian NLL using y_mean and y_lower.
Returns:
dict: dictionary containing the computed metrics.
"""
assert y_true.shape == y_mean.shape, "y_true shape: {}, y_mean shape: {}".format(y_true.shape, y_mean.shape)
assert y_true.shape == y_lower.shape, "y_true shape: {}, y_mean shape: {}".format(y_true.shape, y_lower.shape)
assert y_true.shape == y_upper.shape, "y_true shape: {}, y_mean shape: {}".format(y_true.shape, y_upper.shape)
results = {}
if not isinstance(option, list):
if option == "all":
option_list = ["rmse", "nll", "auucc_gain", "picp", "mpiw", "r2"]
else:
option_list = [option]
if "rmse" in option_list:
results["rmse"] = mean_squared_error(y_true, y_mean, squared=False)
if "nll" in option_list:
if nll_fn is None:
nll = negative_log_likelihood_Gaussian(y_true, y_mean, y_lower, y_upper)
results["nll"] = nll
else:
results["nll"] = np.mean(nll_fn(y_true))
if "auucc_gain" in option_list:
gain = auucc_gain(y_true, y_mean, y_lower, y_upper)
results["auucc_gain"] = gain
if "picp" in option_list:
results["picp"] = picp(y_true, y_lower, y_upper)
if "mpiw" in option_list:
results["mpiw"] = mpiw(y_lower, y_upper)
if "r2" in option_list:
results["r2"] = r2_score(y_true, y_mean)
return results
def _check_not_tuple_of_2_elements(obj, obj_name='obj'):
"""Check object is not tuple or does not have 2 elements."""
if not isinstance(obj, tuple) or len(obj) != 2:
raise TypeError('%s must be a tuple of 2 elements.' % obj_name)
def plot_uncertainty_distribution(dist, show_quantile_dots=False, qd_sample=20, qd_bins=7,
ax=None, figsize=None, dpi=None,
title='Predicted Distribution', xlims=None, xlabel='Prediction', ylabel='Density', **kwargs):
"""
Plot the uncertainty distribution for a single distribution.
Args:
dist: scipy.stats._continuous_distns.
A scipy distribution object.
show_quantile_dots: boolean.
Whether to show quantil dots on top of the density plot.
qd_sample: int.
Number of dots for the quantile dot plot.
qd_bins: int.
Number of bins for the quantile dot plot.
ax: matplotlib.axes.Axes or None, optional (default=None).
Target axes instance. If None, new figure and axes will be created.
figsize: tuple of 2 elements or None, optional (default=None).
Figure size.
dpi : int or None, optional (default=None).
Resolution of the figure.
title : string or None, optional (default=Prediction Distribution)
Axes title.
If None, title is disabled.
xlims : tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.xlim()``.
xlabel : string or None, optional (default=Prediction)
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional (default=Density)
Y-axis title label.
If None, title is disabled.
Returns:
matplotlib.axes.Axes: ax : The plot with prediction distribution.
"""
import matplotlib.pyplot as plt
if ax is None:
if figsize is not None:
_check_not_tuple_of_2_elements(figsize, 'figsize')
_, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
x = np.linspace(dist.ppf(0.01), dist.ppf(0.99), 100)
ax.plot(x, dist.pdf(x), **kwargs)
if show_quantile_dots:
from matplotlib.patches import Circle
from matplotlib.collections import PatchCollection
import matplotlib.ticker as ticker
data = dist.rvs(size=10000)
p_less_than_x = np.linspace(1 / qd_sample / 2, 1 - (1 / qd_sample / 2), qd_sample)
x_ = np.percentile(data, p_less_than_x * 100) # Inverce CDF (ppf)
# Create bins
hist = np.histogram(x_, bins=qd_bins)
bins, edges = hist
radius = (edges[1] - edges[0]) / 2
ax2 = ax.twinx()
patches = []
max_y = 0
for i in range(qd_bins):
x_bin = (edges[i + 1] + edges[i]) / 2
y_bins = [(i + 1) * (radius * 2) for i in range(bins[i])]
max_y = max(y_bins) if max(y_bins) > max_y else max_y
for _, y_bin in enumerate(y_bins):
circle = Circle((x_bin, y_bin), radius)
patches.append(circle)
p = PatchCollection(patches, alpha=0.4)
ax2.add_collection(p)
# Axis tweek
y_scale = (max_y + radius) / max(dist.pdf(x))
ticks_y = ticker.FuncFormatter(lambda x, pos: '{0:g}'.format(x_ / y_scale))
ax2.yaxis.set_major_formatter(ticks_y)
ax2.set_yticklabels([])
if xlims is not None:
ax2.set_xlim(left=xlims[0], right=xlims[1])
else:
ax2.set_xlim([min(x_) - radius, max(x) + radius])
ax2.set_ylim([0, max_y + radius])
ax2.set_aspect(1)
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
return ax
def plot_picp_by_feature(x_test, y_test, y_test_pred_lower_total, y_test_pred_upper_total, num_bins=10,
ax=None, figsize=None, dpi=None, xlims=None, ylims=None, xscale="linear",
title=None, xlabel=None, ylabel=None):
"""
Plot how prediction uncertainty varies across the entire range of a feature.
Args:
x_test: One dimensional ndarray.
Feature column of the test dataset.
y_test: One dimensional ndarray.
Ground truth label of the test dataset.
y_test_pred_lower_total: One dimensional ndarray.
Lower bound of the total uncertainty range.
y_test_pred_upper_total: One dimensional ndarray.
Upper bound of the total uncertainty range.
num_bins: int.
Number of bins used to discritize x_test into equal-sample-sized bins.
ax: matplotlib.axes.Axes or None, optional (default=None). Target axes instance. If None, new figure and axes will be created.
figsize: tuple of 2 elements or None, optional (default=None). Figure size.
dpi : int or None, optional (default=None). Resolution of the figure.
xlims : tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.xlim()``.
ylims: tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.ylim()``.
xscale: Passed to ``ax.set_xscale()``.
title : string or None, optional
Axes title.
If None, title is disabled.
xlabel : string or None |
, optional
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional
Y-axis title label.
If None, title is disabled.
Returns:
matplotlib.axes.Axes: ax : The plot with PICP scores binned by a feature.
"""
from scipy.stats.mstats import mquantiles
import matplotlib.pyplot as plt
if ax is None:
if figsize is not None:
_check_not_tuple_of_2_elements(figsize, 'figsize')
_, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
x_uniques_sorted = np.sort(np.unique(x_test))
num_unique = len(x_uniques_sorted)
sample_bin_ids = np.searchsorted(x_uniques_sorted, x_test)
if len(x_uniques_sorted) > 10: # bin the values
q_bins = mquantiles(x_test, np.histogram_bin_edges([], bins=num_bins-1, range=(0.0, 1.0))[1:])
q_sample_bin_ids = np.digitize(x_test, q_bins)
picps = np.array([picp(y_test[q_sample_bin_ids==bin], y_test_pred_lower_total[q_sample_bin_ids==bin],
y_test_pred_upper_total[q_sample_bin_ids==bin]) for bin in range(num_bins)])
unique_sample_bin_ids = np.digitize(x_uniques_sorted, q_bins)
picp_replicated = [len(x_uniques_sorted[unique_sample_bin_ids == bin]) * [picps[bin]] for bin in range(num_bins)]
picp_replicated = np.array([item for sublist in picp_replicated for item in sublist])
else:
picps = np.array([picp(y_test[sample_bin_ids == bin], y_test_pred_lower_total[sample_bin_ids == bin],
y_test_pred_upper_total[sample_bin_ids == bin]) for bin in range(num_unique)])
picp_replicated = picps
ax.plot(x_uniques_sorted, picp_replicated, label='PICP')
ax.axhline(0.95, linestyle='--', label='95%')
ax.set_ylabel('PICP')
ax.legend(loc='best')
if title is None:
title = 'Test data overall PICP: {:.2f} MPIW: {:.2f}'.format(
picp(y_test,
y_test_pred_lower_total,
y_test_pred_upper_total),
mpiw(y_test_pred_lower_total,
y_test_pred_upper_total))
if xlims is not None:
ax.set_xlim(left=xlims[0], right=xlims[1])
if ylims is not None:
ax.set_ylim(bottom=ylims[0], top=ylims[1])
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if xscale is not None:
ax.set_xscale(xscale)
return ax
def plot_uncertainty_by_feature(x_test, y_test_pred_mean, y_test_pred_lower_total, y_test_pred_upper_total,
y_test_pred_lower_epistemic=None, y_test_pred_upper_epistemic=None,
ax=None, figsize=None, dpi=None, xlims=None, xscale="linear",
title=None, xlabel=None, ylabel=None):
"""
Plot how prediction uncertainty varies across the entire range of a feature.
Args:
x_test: one dimensional ndarray.
Feature column of the test dataset.
y_test_pred_mean: One dimensional ndarray.
Model prediction for the test dataset.
y_test_pred_lower_total: One dimensional ndarray.
Lower bound of the total uncertainty range.
y_test_pred_upper_total: One dimensional ndarray.
Upper bound of the total uncertainty range.
y_test_pred_lower_epistemic: One dimensional ndarray.
Lower bound of the epistemic uncertainty range.
y_test_pred_upper_epistemic: One dimensional ndarray.
Upper bound of the epistemic uncertainty range.
ax: matplotlib.axes.Axes or None, optional (default=None). Target axes instance. If None, new figure and axes will be created.
figsize: tuple of 2 elements or None, optional (default=None). Figure size.
dpi : int or None, optional (default=None). Resolution of the figure.
xlims : tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.xlim()``.
xscale: Passed to ``ax.set_xscale()``.
title : string or None, optional
Axes title.
If None, title is disabled.
xlabel : string or None, optional
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional
Y-axis title label.
If None, title is disabled.
Returns:
matplotlib.axes.Axes: ax : The plot with model's uncertainty binned by a feature.
"""
import matplotlib.pyplot as plt
if ax is None:
if figsize is not None:
_check_not_tuple_of_2_elements(figsize, 'figsize')
_, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
x_uniques_sorted = np.sort(np.unique(x_test))
y_pred_var = ((y_test_pred_upper_total - y_test_pred_lower_total) / 4.0)**2
agg_y_std = np.array([np.sqrt(np.mean(y_pred_var[x_test==x])) for x in x_uniques_sorted])
agg_y_mean = np.array([np.mean(y_test_pred_mean[x_test==x]) for x in x_uniques_sorted])
ax.plot(x_uniques_sorted, agg_y_mean, '-b', lw=2, label='mean prediction')
ax.fill_between(x_uniques_sorted,
agg_y_mean - 2.0 * agg_y_std,
agg_y_mean + 2.0 * agg_y_std,
alpha=0.3, label='total uncertainty')
if y_test_pred_lower_epistemic is not None:
y_pred_var_epistemic = ((y_test_pred_upper_epistemic - y_test_pred_lower_epistemic) / 4.0)**2
agg_y_std_epistemic = np.array([np.sqrt(np.mean(y_pred_var_epistemic[x_test==x])) for x in x_uniques_sorted])
ax.fill_between(x_uniques_sorted,
agg_y_mean - 2.0 * agg_y_std_epistemic,
agg_y_mean + 2.0 * agg_y_std_epistemic,
alpha=0.3, label='model uncertainty')
ax.legend(loc='best')
if xlims is not None:
ax.set_xlim(left=xlims[0], right=xlims[1])
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if xscale is not None:
ax.set_xscale(xscale)
return ax
<s> import numpy as np
import pandas as pd
from scipy.stats import entropy
from sklearn.metrics import roc_auc_score, log_loss, accuracy_score
def entropy_based_uncertainty_decomposition(y_prob_samples):
""" Entropy based decomposition [2]_ of predictive uncertainty into aleatoric and epistemic components.
References:
.. [2] Depeweg, S., Hernandez-Lobato, J. M., Doshi-Velez, F., & Udluft, S. (2018, July). Decomposition of
uncertainty in Bayesian deep learning for efficient and risk-sensitive learning. In International Conference
on Machine Learning (pp. 1184-1193). PMLR.
Args:
y_prob_samples: list of array-like of shape (n_samples, n_classes) containing class prediction probabilities
corresponding to samples from the model posterior.
Returns:
tuple:
- total_uncertainty: entropy of the predictive distribution.
- aleatoric_uncertainty: aleatoric component of the total_uncertainty.
- epistemic_uncertainty: epistemic component of the total_uncertainty.
"""
y_preds_samples_stacked = np.stack(y_prob_samples)
preds_mean = np.mean(y_preds_samples_stacked, 0)
total_uncertainty = entropy(preds_mean, axis=1)
aleatoric_uncertainty = np.mean(
np.concatenate([entropy(y_pred, axis=1).reshape(-1, 1) for y_pred in y_prob_samples], axis=1),
axis=1)
epistemic_uncertainty = total_uncertainty - aleatoric_uncertainty
return total_uncertainty, aleatoric_uncertainty, epistemic_uncertainty
def multiclass_brier_score(y_true, y_prob):
"""Brier score for multi-class.
Args:
y_true: array-like of shape (n_samples,)
ground truth labels.
y_prob: array-like of shape (n_samples, n_classes).
Probability scores from the base model.
Returns:
float: Brier score.
"""
assert len(y_prob.shape) > 1, "y_prob should be array-like of shape (n_samples, n_classes)"
y_target = np.zeros_like(y_prob)
y_target[:, y_true] = 1.0
return np.mean(np.sum((y_target - y_prob) ** 2, axis=1))
def area_under_risk_rejection_rate_curve(y_true, y_prob, y_pred=None, selection_scores=None, risk_func=accuracy_score,
attributes=None, num_bins=10, subgroup_ids=None,
return_counts=False):
""" Computes risk vs rejection rate curve and the area under this curve. Similar to risk-coverage curves [3]_ where
coverage instead of rejection rate is used.
References:
.. [3] Franc, Vojtech, and Daniel Prusa. "On discriminative learning of prediction uncertainty."
In International Conference on Machine Learning, pp. 1963-1971. 2019.
Args:
y_true: array-like of shape (n_samples,)
ground truth labels.
y_prob: array-like of shape (n_samples, n_classes).
Probability scores from the base model.
y_pred: array-like of shape (n_samples,)
predicted labels.
selection_scores: scores corresponding to certainty in the predicted labels.
risk_func: risk function under consideration.
attributes: (optional) if risk function is a fairness metric also pass the protected attribute name.
num_bins: number of bins.
subgroup_ids: (optional) selectively compute risk on a subgroup of the samples specified by subgroup_ids.
return_counts: set to True to return counts also.
Returns:
float or tuple:
- aurrrc (float): area under risk rejection rate curve.
- rejection_rates (list): rejection rates for each bin (returned only if return_counts is True).
- selection_thresholds (list): selection threshold for each bin (returned only if return_counts is True).
- risks (list): risk in each bin (returned only if return_counts is True).
"""
if selection_scores is None:
assert len(y_prob.shape) > 1, "y_prob should be array-like of shape (n_samples, n_classes)"
selection_scores = y_prob[np.arange(y_prob.shape[0]), np.argmax(y_prob, axis=1)]
if y_pred is None:
assert len(y_prob.shape) > 1, "y_prob should be array-like of shape (n_samples, n_classes)"
y_pred = np.argmax(y_prob, axis=1)
order = np.argsort(selection_scores)[::-1]
rejection_rates = []
selection_thresholds = []
risks = []
for bin_id in range(num_bins):
samples_in_bin = len(y_true) // num_bins
selection_threshold = selection_scores[order[samples_in_bin * (bin_id+1)-1]]
selection_thresholds.append(selection_threshold)
ids = selection_scores >= selection_threshold
if sum(ids) > 0:
if attributes is None:
if isinstance(y_true, pd.Series):
y_true_numpy = y_true.values
else:
y_true_numpy = y_true
if subgroup_ids is None:
risk_value = 1.0 - risk_func(y_true_numpy[ids], y_pred[ids])
else:
if sum(subgroup_ids & ids) > 0:
risk_value = 1.0 - risk_func(y_true_numpy[subgroup_ids & ids], y_pred[subgroup_ids & ids])
else:
risk_value = 0.0
else:
risk_value = risk_func(y_true.iloc[ids], y_pred[ids], prot_attr=attributes)
else:
risk_value = 0.0
risks.append(risk_value)
rejection_rates.append(1.0 - 1.0 * sum(ids) / len(y_true))
aurrrc = np.nanmean(risks)
if not return_counts:
return aurrrc
else:
return aurrrc, rejection_rates, selection_thresholds, risks
def expected_calibration_error(y_true, y_prob, y_pred=None, num_bins=10, return_counts=False):
""" Computes the reliability curve and the expected calibration error [1]_ .
References:
.. [1] Chuan Guo, Geoff Pleiss, Yu Sun, Kilian Q. Weinberger; Proceedings of the 34th International Conference
on Machine Learning, PMLR 70:1321-1330, 2017.
Args:
y_true: array-like of shape (n_samples,)
ground truth labels.
y |
_prob: array-like of shape (n_samples, n_classes).
Probability scores from the base model.
y_pred: array-like of shape (n_samples,)
predicted labels.
num_bins: number of bins.
return_counts: set to True to return counts also.
Returns:
float or tuple:
- ece (float): expected calibration error.
- confidences_in_bins: average confidence in each bin (returned only if return_counts is True).
- accuracies_in_bins: accuracy in each bin (returned only if return_counts is True).
- frac_samples_in_bins: fraction of samples in each bin (returned only if return_counts is True).
"""
assert len(y_prob.shape) > 1, "y_prob should be array-like of shape (n_samples, n_classes)"
num_samples, num_classes = y_prob.shape
top_scores = np.max(y_prob, axis=1)
if y_pred is None:
y_pred = np.argmax(y_prob, axis=1)
if num_classes == 2:
bins_edges = np.histogram_bin_edges([], bins=num_bins, range=(0.5, 1.0))
else:
bins_edges = np.histogram_bin_edges([], bins=num_bins, range=(0.0, 1.0))
non_boundary_bin_edges = bins_edges[1:-1]
bin_centers = (bins_edges[1:] + bins_edges[:-1])/2
sample_bin_ids = np.digitize(top_scores, non_boundary_bin_edges)
num_samples_in_bins = np.zeros(num_bins)
accuracies_in_bins = np.zeros(num_bins)
confidences_in_bins = np.zeros(num_bins)
for bin in range(num_bins):
num_samples_in_bins[bin] = len(y_pred[sample_bin_ids == bin])
if num_samples_in_bins[bin] > 0:
accuracies_in_bins[bin] = np.sum(y_true[sample_bin_ids == bin] == y_pred[sample_bin_ids == bin]) / num_samples_in_bins[bin]
confidences_in_bins[bin] = np.sum(top_scores[sample_bin_ids == bin]) / num_samples_in_bins[bin]
ece = np.sum(
num_samples_in_bins * np.abs(accuracies_in_bins - confidences_in_bins) / num_samples
)
frac_samples_in_bins = num_samples_in_bins / num_samples
if not return_counts:
return ece
else:
return ece, confidences_in_bins, accuracies_in_bins, frac_samples_in_bins, bin_centers
def compute_classification_metrics(y_true, y_prob, option='all'):
"""
Computes the metrics specified in the option which can be string or a list of strings. Default option `all` computes
the [aurrrc, ece, auroc, nll, brier, accuracy] metrics.
Args:
y_true: array-like of shape (n_samples,)
ground truth labels.
y_prob: array-like of shape (n_samples, n_classes).
Probability scores from the base model.
option: string or list of string contained the name of the metrics to be computed.
Returns:
dict: a dictionary containing the computed metrics.
"""
results = {}
if not isinstance(option, list):
if option == "all":
option_list = ["aurrrc", "ece", "auroc", "nll", "brier", "accuracy"]
else:
option_list = [option]
if "aurrrc" in option_list:
results["aurrrc"] = area_under_risk_rejection_rate_curve(y_true=y_true, y_prob=y_prob)
if "ece" in option_list:
results["ece"] = expected_calibration_error(y_true=y_true, y_prob=y_prob)
if "auroc" in option_list:
results["auroc"], _ = roc_auc_score(y_true=y_true, y_score=y_prob)
if "nll" in option_list:
results["nll"] = log_loss(y_true=y_true, y_pred=np.argmax(y_prob, axis=1))
if "brier" in option_list:
results["brier"] = multiclass_brier_score(y_true=y_true, y_prob=y_prob)
if "accuracy" in option_list:
results["accuracy"] = accuracy_score(y_true=y_true, y_pred=np.argmax(y_prob, axis=1))
return results
def plot_reliability_diagram(y_true, y_prob, y_pred, plot_label=[""], num_bins=10):
"""
Plots the reliability diagram showing the calibration error for different confidence scores. Multiple curves
can be plot by passing data as lists.
Args:
y_true: array-like or or a list of array-like of shape (n_samples,)
ground truth labels.
y_prob: array-like or or a list of array-like of shape (n_samples, n_classes).
Probability scores from the base model.
y_pred: array-like or or a list of array-like of shape (n_samples,)
predicted labels.
plot_label: (optional) list of names identifying each curve.
num_bins: number of bins.
Returns:
tuple:
- ece_list: ece: list containing expected calibration error for each curve.
- accuracies_in_bins_list: list containing binned average accuracies for each curve.
- frac_samples_in_bins_list: list containing binned sample frequencies for each curve.
- confidences_in_bins_list: list containing binned average confidence for each curve.
"""
import matplotlib.pyplot as plt
if not isinstance(y_true, list):
y_true, y_prob, y_pred = [y_true], [y_prob], [y_pred]
if len(plot_label) != len(y_true):
raise ValueError('y_true and plot_label should be of same length.')
ece_list = []
accuracies_in_bins_list = []
frac_samples_in_bins_list = []
confidences_in_bins_list = []
for idx in range(len(plot_label)):
ece, confidences_in_bins, accuracies_in_bins, frac_samples_in_bins, bins = expected_calibration_error(y_true[idx],
y_prob[idx],
y_pred[idx],
num_bins=num_bins,
return_counts=True)
ece_list.append(ece)
accuracies_in_bins_list.append(accuracies_in_bins)
frac_samples_in_bins_list.append(frac_samples_in_bins)
confidences_in_bins_list.append(confidences_in_bins)
fig = plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
for idx in range(len(plot_label)):
plt.plot(bins, frac_samples_in_bins_list[idx], 'o-', label=plot_label[idx])
plt.title("Confidence Histogram")
plt.xlabel("Confidence")
plt.ylabel("Fraction of Samples")
plt.grid()
plt.ylim([0.0, 1.0])
plt.legend()
plt.subplot(1, 2, 2)
for idx in range(len(plot_label)):
plt.plot(bins, accuracies_in_bins_list[idx], 'o-',
label="{} ECE = {:.2f}".format(plot_label[idx], ece_list[idx]))
plt.plot(np.linspace(0, 1, 50), np.linspace(0, 1, 50), 'b.', label="Perfect Calibration")
plt.title("Reliability Plot")
plt.xlabel("Confidence")
plt.ylabel("Accuracy")
plt.grid()
plt.legend()
plt.show()
return ece_list, accuracies_in_bins_list, frac_samples_in_bins_list, confidences_in_bins_list
def plot_risk_vs_rejection_rate(y_true, y_prob, y_pred, selection_scores=None, plot_label=[""], risk_func=None,
attributes=None, num_bins=10, subgroup_ids=None):
"""
Plots the risk vs rejection rate curve showing the risk for different rejection rates. Multiple curves
can be plot by passing data as lists.
Args:
y_true: array-like or or a list of array-like of shape (n_samples,)
ground truth labels.
y_prob: array-like or or a list of array-like of shape (n_samples, n_classes).
Probability scores from the base model.
y_pred: array-like or or a list of array-like of shape (n_samples,)
predicted labels.
selection_scores: ndarray or a list of ndarray containing scores corresponding to certainty in the predicted labels.
risk_func: risk function under consideration.
attributes: (optional) if risk function is a fairness metric also pass the protected attribute name.
num_bins: number of bins.
subgroup_ids: (optional) ndarray or a list of ndarray containing subgroup_ids to selectively compute risk on a
subgroup of the samples specified by subgroup_ids.
Returns:
tuple:
- aurrrc_list: list containing the area under risk rejection rate curves.
- rejection_rate_list: list containing the binned rejection rates.
- selection_thresholds_list: list containing the binned selection thresholds.
- risk_list: list containing the binned risks.
"""
import matplotlib.pyplot as plt
if not isinstance(y_true, list):
y_true, y_prob, y_pred, selection_scores, subgroup_ids = [y_true], [y_prob], [y_pred], [selection_scores], [subgroup_ids]
if len(plot_label) != len(y_true):
raise ValueError('y_true and plot_label should be of same length.')
aurrrc_list = []
rejection_rate_list = []
risk_list = []
selection_thresholds_list = []
for idx in range(len(plot_label)):
aursrc, rejection_rates, selection_thresholds, risks = area_under_risk_rejection_rate_curve(
y_true[idx],
y_prob[idx],
y_pred[idx],
selection_scores=selection_scores[idx],
risk_func=risk_func,
attributes=attributes,
num_bins=num_bins,
subgroup_ids=subgroup_ids[idx],
return_counts=True
)
aurrrc_list.append(aursrc)
rejection_rate_list.append(rejection_rates)
risk_list.append(risks)
selection_thresholds_list.append(selection_thresholds)
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
for idx in range(len(plot_label)):
plt.plot(rejection_rate_list[idx], risk_list[idx], label="{} AURRRC={:.5f}".format(plot_label[idx], aurrrc_list[idx]))
plt.legend(loc="best")
plt.xlabel("Rejection Rate")
if risk_func is None:
ylabel = "Prediction Error Rate"
else:
if 'accuracy' in risk_func.__name__:
ylabel = "1.0 - " + risk_func.__name__
else:
ylabel = risk_func.__name__
plt.ylabel(ylabel)
plt.title("Risk vs Rejection Rate Plot")
plt.grid()
plt.subplot(1, 2, 2)
for idx in range(len(plot_label)):
plt.plot(selection_thresholds_list[idx], risk_list[idx], label="{}".format(plot_label[idx]))
plt.legend(loc="best")
plt.xlabel("Selection Threshold")
if risk_func is None:
ylabel = "Prediction Error Rate"
else:
if 'accuracy' in risk_func.__name__:
ylabel = "1.0 - " + risk_func.__name__
else:
ylabel = risk_func.__name__
plt.ylabel(ylabel)
plt.title("Risk vs Selection Threshold Plot")
plt.grid()
plt.show()
return aurrrc_list, rejection_rate_list, selection_thresholds_list, risk_list
<s> from .classification_metrics import expected_calibration_error, area_under_risk_rejection_rate_curve, \\
compute_classification_metrics, entropy_based_uncertainty_decomposition
from .regression_metrics import picp, mpiw, compute_regression_metrics, plot_uncertainty_distribution, \\
plot_uncertainty_by_feature, plot_picp_by_feature
from .uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve
<s> from copy import deepcopy
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import simps, trapz
from sklearn.isotonic import IsotonicRegression
DEFAULT_X_AXIS_NAME = 'excess'
DEFAULT_Y_AXIS_NAME = 'missrate'
class UncertaintyCharacteristicsCurve:
"""
Class with main functions of the Uncertainty Characteristics Curve (UCC).
"""
def __init__(self, normalize=True, precompute_bias_data=True):
"""
:param normalize: set initial axes normalization flag (can be changed via set_coordinates())
:param precompute_bias_data: if True, fit() will compute statistics necessary to generate bias-based
UCCs (in addition to the scale-based ones). Skipping this precomputation may speed up the fit() call
if bias-based UCC is not needed.
"""
self.axes_name2idx = {"missrate": 1, "bandwidth": 2, "excess": 3, "deficit": 4}
self.axes_idx2descr = {1: "Missrate", 2: "Bandwidth", 3: "Excess", 4: "Deficit"}
self.x_axis_idx = None
self.y_axis_idx = None
self.norm_x_axis = False
self.norm_y_axis = False
self.std_unit = None
self. |
normalize = normalize
self.d = None
self.gt = None
self.lb = None
self.ub = None
self.precompute_bias_data = precompute_bias_data
self.set_coordinates(x_axis_name=DEFAULT_X_AXIS_NAME, y_axis_name=DEFAULT_Y_AXIS_NAME, normalize=normalize)
def set_coordinates(self, x_axis_name=None, y_axis_name=None, normalize=None):
"""
Assigns user-specified type to the axes and normalization behavior (sticky).
:param x_axis_name: None-> unchanged, or name from self.axes_name2idx
:param y_axis_name: ditto
:param normalize: True/False will activate/deactivate norming for specified axes. Behavior for
Axes_name that are None will not be changed.
Value None will leave norm status unchanged.
Note, axis=='missrate' will never get normalized, even with normalize == True
:return: none
"""
normalize = self.normalize if normalize is None else normalize
if x_axis_name is None and self.x_axis_idx is None:
raise ValueError("ERROR(UCC): x-axis has not been defined.")
if y_axis_name is None and self.y_axis_idx is None:
raise ValueError("ERROR(UCC): y-axis has not been defined.")
if x_axis_name is None and y_axis_name is None and normalize is not None:
# just set normalization on/off for both axes and return
self.norm_x_axis = False if x_axis_name == 'missrate' else normalize
self.norm_y_axis = False if y_axis_name == 'missrate' else normalize
return
if x_axis_name is not None:
self.x_axis_idx = self.axes_name2idx[x_axis_name]
self.norm_x_axis = False if x_axis_name == 'missrate' else normalize
if y_axis_name is not None:
self.y_axis_idx = self.axes_name2idx[y_axis_name]
self.norm_y_axis = False if y_axis_name == 'missrate' else normalize
def set_std_unit(self, std_unit=None):
"""
Sets the UCC's unit to be used when displaying normalized axes.
:param std_unit: if None, the unit will be calculated as stddev of the ground truth data
(ValueError raised if data has not been set at this point)
or set to the user-specified value.
:return:
"""
if std_unit is None: # set it to stddev of data
if self.gt is None:
raise ValueError("ERROR(UCC): No data specified - cannot set stddev unit.")
self.std_unit = np.std(self.gt)
if np.isclose(self.std_unit, 0.):
print("WARN(UCC): data-based stddev is zero - resetting axes unit to 1.")
self.std_unit = 1.
else:
self.std_unit = float(std_unit)
def fit(self, X, gt):
"""
Calculates internal arrays necessary for other methods (plotting, auc, cost minimization).
Re-entrant.
:param X: [numsamples, 3] numpy matrix, or list of numpy matrices.
Col 1: predicted values
Col 2: lower band (deviate) wrt predicted value (always positive)
Col 3: upper band wrt predicted value (always positive)
If list is provided, all methods will output corresponding metrics as lists as well!
:param gt: Ground truth array (i.e.,the 'actual' values corresponding to predictions in X
:return: self
"""
if not isinstance(X, list):
X = [X]
newX = []
for x in X:
assert (isinstance(x, np.ndarray) and len(x.shape) == 2 and x.shape[1] == 3 and x.shape[0] == len(gt))
newX.append(self._sanitize_input(x))
self.d = [gt - x[:, 0] for x in newX]
self.lb = [x[:, 1] for x in newX]
self.ub = [x[:, 2] for x in newX]
self.gt = gt
self.set_std_unit()
self.plotdata_for_scale = []
self.plotdata_for_bias = []
# precompute plotdata:
for i in range(len(self.d)):
self.plotdata_for_scale.append(self._calc_plotdata(self.d[i], self.lb[i], self.ub[i], vary_bias=False))
if self.precompute_bias_data:
self.plotdata_for_bias.append(self._calc_plotdata(self.d[i], self.lb[i], self.ub[i], vary_bias=True))
return self
def minimize_cost(self, x_axis_cost=.5, y_axis_cost=.5, augment_cost_by_normfactor=True,
search=('scale', 'bias')):
"""
Find minima of a linear cost function for each component.
Cost function C = x_axis_cost * x_axis_value + y_axis_cost * y_axis_value.
A minimum can occur in the scale-based or bias-based UCC (this can be constrained by the 'search' arg).
The function returns a 'recipe' how to achieve the corresponding minimum, for each component.
:param x_axis_cost: weight of one unit on x_axis
:param y_axis_cost: weight of one unit on y_axis
:param augment_cost_by_normfactor: when False, the cost multipliers will apply as is. If True, they will be
pre-normed by the corresponding axis norm (where applicable), to account for range differences between axes.
:param search: list of types over which minimization is to be performed, valid elements are 'scale' and 'bias'.
:return: list of dicts - one per component, or a single dict, if there is only one component. Dict keys are -
'operation': can be 'bias' (additive) or 'scale' (multiplicative), 'modvalue': value to multiply by or to
add to error bars to achieve the minimum, 'new_x'/'new_y': new coordinates (operating point) with that
minimum, 'cost': new cost at minimum point, 'original_cost': original cost (original operating point).
"""
if self.d is None:
raise ValueError("ERROR(UCC): call fit() prior to using this method.")
if augment_cost_by_normfactor:
if self.norm_x_axis:
x_axis_cost /= self.std_unit
if self.norm_y_axis:
y_axis_cost /= self.std_unit
print("INFO(UCC): Pre-norming costs by corresp. std deviation: new x_axis_cost = %.4f, y_axis_cost = %.4f" %
(x_axis_cost, y_axis_cost))
if isinstance(search, tuple):
search = list(search)
if not isinstance(search, list):
search = [search]
min_costs = []
for d in range(len(self.d)):
# original OP cost
m, b, e, df = self._calc_missrate_bandwidth_excess_deficit(self.d[d], self.lb[d], self.ub[d])
original_cost = x_axis_cost * [0., m, b, e, df][self.x_axis_idx] + y_axis_cost * [0., m, b, e, df][
self.y_axis_idx]
plotdata = self.plotdata_for_scale[d]
cost_scale, minidx_scale = self._find_min_cost_in_component(plotdata, self.x_axis_idx, self.y_axis_idx,
x_axis_cost, y_axis_cost)
mcf_scale_multiplier = plotdata[minidx_scale][0]
mcf_scale_x = plotdata[minidx_scale][self.x_axis_idx]
mcf_scale_y = plotdata[minidx_scale][self.y_axis_idx]
if 'bias' in search:
if not self.precompute_bias_data:
raise ValueError(
"ERROR(UCC): Cannot perform minimization - instantiated without bias data computation")
plotdata = self.plotdata_for_bias[d]
cost_bias, minidx_bias = self._find_min_cost_in_component(plotdata, self.x_axis_idx, self.y_axis_idx,
x_axis_cost, y_axis_cost)
mcf_bias_add = plotdata[minidx_bias][0]
mcf_bias_x = plotdata[minidx_bias][self.x_axis_idx]
mcf_bias_y = plotdata[minidx_bias][self.y_axis_idx]
if 'bias' in search and 'scale' in search:
if cost_bias < cost_scale:
min_costs.append({'operation': 'bias', 'cost': cost_bias, 'modvalue': mcf_bias_add,
'new_x': mcf_bias_x, 'new_y': mcf_bias_y, 'original_cost': original_cost})
else:
min_costs.append({'operation': 'scale', 'cost': cost_scale, 'modvalue': mcf_scale_multiplier,
'new_x': mcf_scale_x, 'new_y': mcf_scale_y, 'original_cost': original_cost})
elif 'scale' in search:
min_costs.append({'operation': 'scale', 'cost': cost_scale, 'modvalue': mcf_scale_multiplier,
'new_x': mcf_scale_x, 'new_y': mcf_scale_y, 'original_cost': original_cost})
elif 'bias' in search:
min_costs.append({'operation': 'bias', 'cost': cost_bias, 'modvalue': mcf_bias_add,
'new_x': mcf_bias_x, 'new_y': mcf_bias_y, 'original_cost': original_cost})
else:
raise ValueError("(ERROR): Unknown search element (%s) requested." % ",".join(search))
if len(min_costs) < 2:
return min_costs[0]
else:
return min_costs
def get_specific_operating_point(self, req_x_axis_value=None, req_y_axis_value=None,
req_critical_value=None, vary_bias=False):
"""
Finds corresponding operating point on the current UCC, given a point on either x or y axis. Returns
a list of recipes how to achieve the point (x,y), for each component. If there is only one component,
returns a single recipe dict.
:param req_x_axis_value: requested x value on UCC (normalization status is taken from current display)
:param req_y_axis_value: requested y value on UCC (normalization status is taken from current display)
:param vary_bias: set to True when referring to bias-induced UCC (scale UCC default)
:return: list of dicts (recipes), or a single dict
"""
if self.d is None:
raise ValueError("ERROR(UCC): call fit() prior to using this method.")
if np.sum([req_x_axis_value is not None, req_y_axis_value is not None, req_critical_value is not None]) != 1:
raise ValueError("ERROR(UCC): exactly one axis value must be requested at a time.")
if vary_bias and not self.precompute_bias_data:
raise ValueError("ERROR(UCC): Cannot vary bias - instantiated without bias data computation")
xnorm = self.std_unit if self.norm_x_axis else 1.
ynorm = self.std_unit if self.norm_y_axis else 1.
recipe = []
for dc in range(len(self.d)):
plotdata = self.plotdata_for_bias[dc] if vary_bias else self.plotdata_for_scale[dc]
if req_x_axis_value is not None:
tgtidx = self.x_axis_idx
req_value = req_x_axis_value * xnorm
elif req_y_axis_value is not None:
tgtidx = self.y_axis_idx
req_value = req_y_axis_value * ynorm
elif req_critical_value is not None:
req_value = req_critical_value
tgtidx = 0 # first element in plotdata is always the critical value (scale of bias)
else:
raise RuntimeError("Unhandled case")
closestidx = np.argmin(np.asarray([np.abs(p[tgtidx] - req_value) for p in plotdata]))
recipe.append({'operation': ('bias' if vary_bias else 'scale'),
'modvalue': plotdata[closestidx][0],
'new_x': plotdata[closestidx][self.x_axis_idx] / xnorm,
'new_y': plotdata[closestidx][self.y_axis_idx] / ynorm})
if len(recipe) < 2:
return recipe[0]
else:
return recipe
def _find_min_cost_in_component(self, plotdata, idx1, idx2, cost1, cost2):
"""
Find s minimum cost function value and corresp. position index in plotdata
:param plotdata: liste of tuples
:param idx1: idx of x-axis item within the tuple
:param idx2: idx of y-axis item within the tuple
:param cost1: cost factor for x-axis unit
:param cost2: cost factor for y-axis unit
:return: min cost value, index within plotdata where minimum occurs
"""
raw = [cost1 * i[idx1] + cost2 * i[idx2] for i in plotdata]
minidx = np.argmin(raw)
return raw[minidx], minidx
def _sanitize_input(self, x):
"""
Replaces problematic values in input data (e.g, zero error bars)
:param x: single matrix of input data [n, 3]
:return: sanitized version of x
"""
if np.isclose(np.sum(x[:, 1]), 0.):
raise ValueError("ERROR(UCC): Provided lower bands are all zero.")
if np.isclose(np.sum(x[:, 2]), 0.):
raise ValueError("ERROR(UCC): Provided upper bands are all zero.")
for i in [1, 2]:
if any(np.isclose(x[:, i], 0.)):
print("WARN(UCC): some band values are 0. - REPLACING with positive minimum")
m = np.min(x[x[:, i] > 0, i])
x = np.where(np.isclose(x, 0.), m, x)
return x
def _calc_avg_excess(self, d, lb, ub):
"""
Excess is amount an error bar overshoots actual
:param d: pred-actual array
:param lb: lower band
:param ub: upper band
:return: average excess over array
|
"""
excess = np.zeros(d.shape)
posidx = np.where(d >= 0)[0]
excess[posidx] = np.where(ub[posidx] - d[posidx] < 0., 0., ub[posidx] - d[posidx])
negidx = np.where(d < 0)[0]
excess[negidx] = np.where(lb[negidx] + d[negidx] < 0., 0., lb[negidx] + d[negidx])
return np.mean(excess)
def _calc_avg_deficit(self, d, lb, ub):
"""
Deficit is error bar insufficiency: bar falls short of actual
:param d: pred-actual array
:param lb: lower band
:param ub: upper band
:return: average deficit over array
"""
deficit = np.zeros(d.shape)
posidx = np.where(d >= 0)[0]
deficit[posidx] = np.where(- ub[posidx] + d[posidx] < 0., 0., - ub[posidx] + d[posidx])
negidx = np.where(d < 0)[0]
deficit[negidx] = np.where(- lb[negidx] - d[negidx] < 0., 0., - lb[negidx] - d[negidx])
return np.mean(deficit)
def _calc_missrate_bandwidth_excess_deficit(self, d, lb, ub, scale=1.0, bias=0.0):
"""
Calculates recall at a given scale/bias, average bandwidth and average excess
:param d: delta
:param lb: lower band
:param ub: upper band
:param scale: scale * (x + bias)
:param bias:
:return: miss rate, average bandwidth, avg excess, avg deficit
"""
abslband = scale * np.where((lb + bias) < 0., 0., lb + bias)
absuband = scale * np.where((ub + bias) < 0., 0., ub + bias)
recall = np.sum((d >= - abslband) & (d <= absuband)) / len(d)
avgbandwidth = np.mean([absuband, abslband])
avgexcess = self._calc_avg_excess(d, abslband, absuband)
avgdeficit = self._calc_avg_deficit(d, abslband, absuband)
return 1 - recall, avgbandwidth, avgexcess, avgdeficit
def _calc_plotdata(self, d, lb, ub, vary_bias=False):
"""
Generates data necessary for various UCC metrics.
:param d: delta (predicted - actual) vector
:param ub: upper uncertainty bandwidth (above predicted)
:param lb: lower uncertainty bandwidth (below predicted) - all positive (bandwidth)
:param vary_bias: True will switch to additive bias instead of scale
:return: list. Elements are tuples (varyvalue, missrate, bandwidth, excess, deficit)
"""
# step 1: collect critical scale or bias values
critval = []
for i in range(len(d)):
if not vary_bias:
if d[i] >= 0:
critval.append(d[i] / ub[i])
else:
critval.append(-d[i] / lb[i])
else:
if d[i] >= 0:
critval.append(d[i] - ub[i])
else:
critval.append(-lb[i] - d[i])
critval = sorted(critval)
plotdata = []
for i in range(len(critval)):
if not vary_bias:
missrate, bandwidth, excess, deficit = self._calc_missrate_bandwidth_excess_deficit(d, lb, ub,
scale=critval[i])
else:
missrate, bandwidth, excess, deficit = self._calc_missrate_bandwidth_excess_deficit(d, lb, ub,
bias=critval[i])
plotdata.append((critval[i], missrate, bandwidth, excess, deficit))
return plotdata
def get_AUUCC(self, vary_bias=False, aucfct="trapz", partial_x=None, partial_y=None):
"""
returns approximate area under the curve on current coordinates, for each component.
:param vary_bias: False == varies scale, True == varies bias
:param aucfct: specifies AUC integrator (can be "trapz", "simps")
:param partial_x: tuple (x_min, x_max) defining interval on x to calc a a partial AUC.
The interval bounds refer to axes as visualized (ie. potentially normed)
:param partial_y: tuple (y_min, y_max) defining interval on y to calc a a partial AUC. partial_x must be None.
:return: list of floats with AUUCCs for each input component, or a single float, if there is only 1 component.
"""
if self.d is None:
raise ValueError("ERROR(UCC): call fit() prior to using this method.")
if vary_bias and not self.precompute_bias_data:
raise ValueError("ERROR(UCC): Cannot vary bias - instantiated without bias data computation")
if partial_x is not None and partial_y is not None:
raise ValueError("ERROR(UCC): partial_x and partial_y can not be specified at the same time.")
assert(partial_x is None or (isinstance(partial_x, tuple) and len(partial_x)==2))
assert(partial_y is None or (isinstance(partial_y, tuple) and len(partial_y)==2))
# find starting point (where the x axis value starts to actually change)
rv = []
# do this for individual streams
xind = self.x_axis_idx
aucfct = simps if aucfct == "simps" else trapz
for s in range(len(self.d)):
plotdata = self.plotdata_for_bias[s] if vary_bias else self.plotdata_for_scale[s]
prev = plotdata[0][xind]
t = 1
cval = plotdata[t][xind]
while cval == prev and t < len(plotdata) - 1:
t += 1
prev = cval
cval = plotdata[t][xind]
startt = t - 1 # from here, it's a valid function
endtt = len(plotdata)
if startt >= endtt - 2:
rvs = 0. # no area
else:
xnorm = self.std_unit if self.norm_x_axis else 1.
ynorm = self.std_unit if self.norm_y_axis else 1.
y=[(plotdata[i][self.y_axis_idx]) / ynorm for i in range(startt, endtt)]
x=[(plotdata[i][self.x_axis_idx]) / xnorm for i in range(startt, endtt)]
if partial_x is not None:
from_i = self._find_closest_index(partial_x[0], x)
to_i = self._find_closest_index(partial_x[1], x) + 1
elif partial_y is not None:
from_i = self._find_closest_index(partial_y[0], y)
to_i = self._find_closest_index(partial_y[1], y)
if from_i > to_i: # y is in reverse order
from_i, to_i = to_i, from_i
to_i += 1 # as upper bound in array indexing
else:
from_i = 0
to_i = len(x)
to_i = min(to_i, len(x))
if to_i < from_i:
raise ValueError("ERROR(UCC): Failed to find an appropriate partial-AUC interval in the data.")
if to_i - from_i < 2:
raise RuntimeError("ERROR(UCC): There are too few samples (1) in the partial-AUC interval specified")
rvs = aucfct(x=x[from_i:to_i], y=y[from_i:to_i])
rv.append(rvs)
if len(rv) < 2:
return rv[0]
else:
return rv
@ staticmethod
def _find_closest_index(value, array):
"""
Returns an index of the 'array' element closest in value to 'value'
:param value:
:param array:
:return:
"""
return np.argmin(np.abs(np.asarray(array)-value))
def _get_single_OP(self, d, lb, ub, scale=1., bias=0.):
"""
Returns Operating Point for original input data, on coordinates currently set up, given a scale/bias.
:param scale:
:param bias:
:return: single tuple (x point, y point, unit of x, unit of y)
"""
xnorm = self.std_unit if self.norm_x_axis else 1.
ynorm = self.std_unit if self.norm_y_axis else 1.
auxop = self._calc_missrate_bandwidth_excess_deficit(d, lb, ub, scale=scale, bias=bias)
op = [0.] + [i for i in auxop] # mimic plotdata (first element ignored here)
return (op[self.x_axis_idx] / xnorm, op[self.y_axis_idx] / ynorm, xnorm, ynorm)
def get_OP(self, scale=1., bias=0.):
"""
Returns all Operating Points for original input data, on coordinates currently set up, given a scale/bias.
:param scale:
:param bias:
:return: list of tuples (x point, y point, unit of x, unit of y) or a single tuple if there is only
1 component.
"""
if self.d is None:
raise ValueError("ERROR(UCC): call fit() prior to using this method.")
op = []
for dc in range(len(self.d)):
op.append(self._get_single_OP(self.d[dc], self.lb[dc], self.ub[dc], scale=scale, bias=bias))
if len(op) < 2:
return op[0]
else:
return op
def plot_UCC(self, titlestr='', syslabel='model', outfn=None, vary_bias=False, markers=None,
xlim=None, ylim=None, **kwargs):
""" Will plot/display the UCC based on current data and coordinates. Multiple curves will be shown
if there are multiple data components (via fit())
:param titlestr: Plot title string
:param syslabel: list is label strings to appear in the plot legend. Can be single, if one component.
:param outfn: base name of an image file to be created (will append .png before creating)
:param vary_bias: True will switch to varying additive bias (default is multiplicative scale)
:param markers: None or a list of marker styles to be used for each curve.
List must be same or longer than number of components.
Markers can be one among these ['o', 's', 'v', 'D', '+'].
:param xlim: tuples or lists of specifying the range for the x axis, or None (auto)
:param ylim: tuples or lists of specifying the range for the y axis, or None (auto)
:param `**kwargs`: Additional arguments passed to the main plot call.
:return: list of areas under the curve (or single area, if one data component)
list of operating points (or single op): format of an op is tuple (xaxis value, yaxis value, xunit, yunit)
"""
if self.d is None:
raise ValueError("ERROR(UCC): call fit() prior to using this method.")
if vary_bias and not self.precompute_bias_data:
raise ValueError("ERROR(UCC): Cannot vary bias - instantiated without bias data computation")
if not isinstance(syslabel, list):
syslabel = [syslabel]
assert (len(syslabel) == len(self.d))
assert (markers is None or (isinstance(markers, list) and len(markers) >= len(self.d)))
# main plot of (possibly multiple) datasets
plt.figure()
xnorm = self.std_unit if self.norm_x_axis else 1.
ynorm = self.std_unit if self.norm_y_axis else 1.
op_info = []
auucc = self.get_AUUCC(vary_bias=vary_bias)
auucc = [auucc] if not isinstance(auucc, list) else auucc
for s in range(len(self.d)):
# original operating point
x_op, y_op, x_unit, y_unit = self._get_single_OP(self.d[s], self.lb[s], self.ub[s])
op_info.append((x_op, y_op, x_unit, y_unit))
# display chart
plotdata = self.plotdata_for_scale[s] if not vary_bias else self.plotdata_for_bias[s]
axisX_data = [i[self.x_axis_idx] / xnorm for i in plotdata]
axisY_data = [i[self.y_axis_idx] / ynorm for i in plotdata]
marker = None
if markers is not None: marker = markers[s]
p = plt.plot(axisX_data, axisY_data, label=syslabel[s] + (" (AUC=%.3f)" % auucc[s]), marker=marker, **kwargs)
if s + 1 == len(self.d):
oplab = 'OP'
else:
oplab = None
plt.plot(x_op, y_op, marker='o', color=p[0].get_color(), label=oplab, markerfacecolor='w',
markeredgewidth=1.5, markeredgecolor=p[0].get_color())
axisX_label = self.axes_idx2descr[self.x_axis_idx]
axisY_label = self.axes_idx2descr[self.y_axis_idx]
axisX_units = "(raw)" if np.isclose(xnorm, 1.0) else "[in std deviations]"
axisY_units = "(raw)" if np.isclose(ynorm, 1.0) else "[in std deviations]"
axisX_label += ' ' + axisX_units
axisY_label += ' ' + axisY_units
if ylim is not None:
plt.ylim(ylim)
if xlim is not None:
plt.xlim(xlim)
plt.xlabel(axisX_label)
plt.ylabel(axisY_label)
plt.legend()
plt.title(titlestr)
plt.grid()
if outfn is None:
plt.show()
else:
plt.savefig(outfn)
if len(auucc) < 2:
auucc = auucc[0]
op_info = op_info[0]
return auucc, op_info
<s> from .uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve
<s> import torch
import torch.nn.functional as F
from uq3 |
60.models.noise_models.heteroscedastic_noise_models import GaussianNoise
class GaussianNoiseMLPNet(torch.nn.Module):
def __init__(self, num_features, num_outputs, num_hidden):
super(GaussianNoiseMLPNet, self).__init__()
self.fc = torch.nn.Linear(num_features, num_hidden)
self.fc_mu = torch.nn.Linear(num_hidden, num_outputs)
self.fc_log_var = torch.nn.Linear(num_hidden, num_outputs)
self.noise_layer = GaussianNoise()
def forward(self, x):
x = F.relu(self.fc(x))
mu = self.fc_mu(x)
log_var = self.fc_log_var(x)
return mu, log_var
def loss(self, y_true=None, mu_pred=None, log_var_pred=None):
return self.noise_layer.loss(y_true, mu_pred, log_var_pred, reduce_mean=True)<s><s><s> """
Contains implementations of various utilities used by Horseshoe Bayesian layers
"""
import numpy as np
import torch
from torch.nn import Parameter
td = torch.distributions
gammaln = torch.lgamma
def diag_gaussian_entropy(log_std, D):
return 0.5 * D * (1.0 + torch.log(2 * np.pi)) + torch.sum(log_std)
def inv_gamma_entropy(a, b):
return torch.sum(a + torch.log(b) + torch.lgamma(a) - (1 + a) * torch.digamma(a))
def log_normal_entropy(log_std, mu, D):
return torch.sum(log_std + mu + 0.5) + (D / 2) * np.log(2 * np.pi)
class InvGammaHalfCauchyLayer(torch.nn.Module):
"""
Uses the inverse Gamma parameterization of the half-Cauchy distribution.
a ~ C^+(0, b) <==> a^2 ~ IGamma(0.5, 1/lambda), lambda ~ IGamma(0.5, 1/b^2), where lambda is an
auxiliary latent variable.
Uses a factorized variational approximation q(ln a^2)q(lambda) = N(mu, sigma^2) IGamma(ahat, bhat).
This layer places a half Cauchy prior on the scales of each output node of the layer.
"""
def __init__(self, out_features, b):
"""
:param out_fatures: number of output nodes in the layer.
:param b: scale of the half Cauchy
"""
super(InvGammaHalfCauchyLayer, self).__init__()
self.b = b
self.out_features = out_features
# variational parameters for q(ln a^2)
self.mu = Parameter(torch.FloatTensor(out_features))
self.log_sigma = Parameter(torch.FloatTensor(out_features))
# self.log_sigma = torch.FloatTensor(out_features)
# variational parameters for q(lambda). These will be updated via fixed point updates, hence not parameters.
self.ahat = torch.FloatTensor([1.]) # The posterior parameter is always 1.
self.bhat = torch.ones(out_features) * (1.0 / self.b ** 2)
self.const = torch.FloatTensor([0.5])
self.initialize_from_prior()
def initialize_from_prior(self):
"""
Initializes variational parameters by sampling from the prior.
"""
# sample from half cauchy and log to initialize the mean of the log normal
sample = np.abs(self.b * (np.random.randn(self.out_features) / np.random.randn(self.out_features)))
self.mu.data = torch.FloatTensor(np.log(sample))
self.log_sigma.data = torch.FloatTensor(np.random.randn(self.out_features) - 10.)
def expectation_wrt_prior(self):
"""
Computes E[ln p(a^2 | lambda)] + E[ln p(lambda)]
"""
expected_a_given_lambda = -gammaln(self.const) - 0.5 * (torch.log(self.bhat) - torch.digamma(self.ahat)) + (
-0.5 - 1.) * self.mu - torch.exp(-self.mu + 0.5 * self.log_sigma.exp() ** 2) * (self.ahat / self.bhat)
expected_lambda = -gammaln(self.const) - 2 * 0.5 * np.log(self.b) + (-self.const - 1.) * (
torch.log(self.bhat) - torch.digamma(self.ahat)) - (1. / self.b ** 2) * (self.ahat / self.bhat)
return torch.sum(expected_a_given_lambda) + torch.sum(expected_lambda)
def entropy(self):
"""
Computes entropy of q(ln a^2) and q(lambda)
"""
return self.entropy_lambda() + self.entropy_a2()
def entropy_lambda(self):
return inv_gamma_entropy(self.ahat, self.bhat)
def entropy_a2(self):
return log_normal_entropy(self.log_sigma, self.mu, self.out_features)
def kl(self):
"""
Computes KL(q(ln(a^2)q(lambda) || IG(a^2 | 0.5, 1/lambda) IG(lambda | 0.5, 1/b^2))
"""
return -self.expectation_wrt_prior() - self.entropy()
def fixed_point_updates(self):
# update lambda moments
self.bhat = torch.exp(-self.mu + 0.5 * self.log_sigma.exp() ** 2) + (1. / self.b ** 2)
class InvGammaLayer(torch.nn.Module):
"""
Approximates the posterior of c^2 with prior IGamma(c^2 | a , b)
using a log Normal approximation q(ln c^2) = N(mu, sigma^2)
"""
def __init__(self, a, b, out_features=1):
super(InvGammaLayer, self).__init__()
self.a = torch.FloatTensor([a])
self.b = torch.FloatTensor([b])
# variational parameters for q(ln c^2)
self.mu = Parameter(torch.FloatTensor(out_features))
self.log_sigma = Parameter(torch.FloatTensor(out_features))
self.out_features = out_features
self.initialize_from_prior()
def initialize_from_prior(self):
"""
Initializes variational parameters by sampling from the prior.
"""
self.mu.data = torch.log(self.b / (self.a + 1) * torch.ones(self.out_features)) # initialize at the mode
self.log_sigma.data = torch.FloatTensor(np.random.randn(self.out_features) - 10.)
def expectation_wrt_prior(self):
"""
Computes E[ln p(c^2 | a, b)]
"""
# return self.c_a * np.log(self.c_b) - gammaln(self.c_a) + (
# - self.c_a - 1) * c_mu - self.c_b * Ecinv
return self.a * torch.log(self.b) - gammaln(self.a) + (- self.a - 1) \\
* self.mu - self.b * torch.exp(-self.mu + 0.5 * self.log_sigma.exp() ** 2)
def entropy(self):
return log_normal_entropy(self.log_sigma, self.mu, 1)
def kl(self):
"""
Computes KL(q(ln(c^2) || IG(c^2 | a, b))
"""
return -self.expectation_wrt_prior().sum() - self.entropy()
<s> """
Contains implementations of various Bayesian layers
"""
import numpy as np
import torch
import torch.nn.functional as F
from torch.nn import Parameter
from uq360.models.bayesian_neural_networks.layer_utils import InvGammaHalfCauchyLayer, InvGammaLayer
td = torch.distributions
def reparam(mu, logvar, do_sample=True, mc_samples=1):
if do_sample:
std = torch.exp(0.5 * logvar)
eps = torch.FloatTensor(std.size()).normal_()
sample = mu + eps * std
for _ in np.arange(1, mc_samples):
sample += mu + eps * std
return sample / mc_samples
else:
return mu
class BayesianLinearLayer(torch.nn.Module):
"""
Affine layer with N(0, v/H) or N(0, user specified v) priors on weights and
fully factorized variational Gaussian approximation
"""
def __init__(self, in_features, out_features, cuda=False, init_weight=None, init_bias=None, prior_stdv=None):
super(BayesianLinearLayer, self).__init__()
self.cuda = cuda
self.in_features = in_features
self.out_features = out_features
# weight mean params
self.weights = Parameter(torch.Tensor(out_features, in_features))
self.bias = Parameter(torch.Tensor(out_features))
# weight variance params
self.weights_logvar = Parameter(torch.Tensor(out_features, in_features))
self.bias_logvar = Parameter(torch.Tensor(out_features))
# numerical stability
self.fudge_factor = 1e-8
if not prior_stdv:
# We will use a N(0, 1/num_inputs) prior over weights
self.prior_stdv = torch.FloatTensor([1. / np.sqrt(self.weights.size(1))])
else:
self.prior_stdv = torch.FloatTensor([prior_stdv])
# self.prior_stdv = torch.Tensor([1. / np.sqrt(1e+3)])
self.prior_mean = torch.FloatTensor([0.])
# for Bias use a prior of N(0, 1)
self.prior_bias_stdv = torch.FloatTensor([1.])
self.prior_bias_mean = torch.FloatTensor([0.])
# init params either random or with pretrained net
self.init_parameters(init_weight, init_bias)
def init_parameters(self, init_weight, init_bias):
# init means
if init_weight is not None:
self.weights.data = torch.Tensor(init_weight)
else:
self.weights.data.normal_(0, np.float(self.prior_stdv.numpy()[0]))
if init_bias is not None:
self.bias.data = torch.Tensor(init_bias)
else:
self.bias.data.normal_(0, 1)
# init variances
self.weights_logvar.data.normal_(-9, 1e-2)
self.bias_logvar.data.normal_(-9, 1e-2)
def forward(self, x, do_sample=True, scale_variances=False):
# local reparameterization trick
mu_activations = F.linear(x, self.weights, self.bias)
var_activations = F.linear(x.pow(2), self.weights_logvar.exp(), self.bias_logvar.exp())
if scale_variances:
activ = reparam(mu_activations, var_activations.log() - np.log(self.in_features), do_sample=do_sample)
else:
activ = reparam(mu_activations, var_activations.log(), do_sample=do_sample)
return activ
def kl(self):
"""
KL divergence (q(W) || p(W))
:return:
"""
weights_logvar = self.weights_logvar
kld_weights = self.prior_stdv.log() - weights_logvar.mul(0.5) + \\
(weights_logvar.exp() + (self.weights.pow(2) - self.prior_mean)) / (
2 * self.prior_stdv.pow(2)) - 0.5
kld_bias = self.prior_bias_stdv.log() - self.bias_logvar.mul(0.5) + \\
(self.bias_logvar.exp() + (self.bias.pow(2) - self.prior_bias_mean)) / (
2 * self.prior_bias_stdv.pow(2)) \\
- 0.5
return kld_weights.sum() + kld_bias.sum()
class HorseshoeLayer(BayesianLinearLayer):
"""
Uses non-centered parametrization. w_k = v*tau_k*beta_k where k indexes an output unit and w_k and beta_k
are vectors of all weights incident into the unit
"""
def __init__(self, in_features, out_features, cuda=False, scale=1.):
super(HorseshoeLayer, self).__init__(in_features, out_features)
self.cuda = cuda
self.in_features = in_features
self.out_features = out_features
self.nodescales = InvGammaHalfCauchyLayer(out_features=out_features, b=1.)
self.layerscale = InvGammaHalfCauchyLayer(out_features=1, b=scale)
# prior on beta is N(0, I) when employing non centered parameterization
self.prior_stdv = torch.Tensor([1])
self.prior_mean = torch.Tensor([0.])
def forward(self, x, do_sample=True, debug=False, eps_scale=None, eps_w=None):
# At a particular unit k, preactivation_sample = scale_sample * pre_activation_sample
# sample scales
scale_mean = 0.5 * (self.nodescales.mu + self.layerscale.mu)
scale_var = 0.25 * (self.nodescales.log_sigma.exp() ** 2 + self.layerscale.log_sigma.exp() ** 2)
scale_sample = reparam(scale_mean, scale_var.log(), do_sample=do_sample).exp()
# sample preactivations
mu_activations = F.linear(x, self.weights, self.bias)
var_activations = F.linear(x.pow(2), self.weights_logvar.exp(), self.bias_logvar.exp())
activ_sample = reparam(mu_activations, var_activations.log(), do_sample=do_sample)
|
return scale_sample * activ_sample
def kl(self):
return super(HorseshoeLayer, self).kl() + self.nodescales.kl() + self.layerscale.kl()
def fixed_point_updates(self):
self.nodescales.fixed_point_updates()
self.layerscale.fixed_point_updates()
class RegularizedHorseshoeLayer(HorseshoeLayer):
"""
Uses the regularized Horseshoe distribution. The regularized Horseshoe soft thresholds the tails of the Horseshoe.
For all weights w_k incident upon node k in the layer we have:
w_k ~ N(0, (tau_k * v)^2 I) N(0, c^2 I), c^2 ~ InverseGamma(c_a, b).
c^2 controls the scale of the thresholding. As c^2 -> infinity, the regularized Horseshoe -> Horseshoe.
"""
def __init__(self, in_features, out_features, cuda=False, scale=1., c_a=2., c_b=6.):
super(RegularizedHorseshoeLayer, self).__init__(in_features, out_features, cuda=cuda, scale=scale)
self.c = InvGammaLayer(a=c_a, b=c_b)
def forward(self, x, do_sample=True, **kwargs):
# At a particular unit k, preactivation_sample = scale_sample * pre_activation_sample
# sample regularized scales
scale_mean = self.nodescales.mu + self.layerscale.mu
scale_var = self.nodescales.log_sigma.exp() ** 2 + self.layerscale.log_sigma.exp() ** 2
scale_sample = reparam(scale_mean, scale_var.log(), do_sample=do_sample).exp()
c_sample = reparam(self.c.mu, 2 * self.c.log_sigma, do_sample=do_sample).exp()
regularized_scale_sample = (c_sample * scale_sample) / (c_sample + scale_sample)
# sample preactivations
mu_activations = F.linear(x, self.weights, self.bias)
var_activations = F.linear(x.pow(2), self.weights_logvar.exp(), self.bias_logvar.exp())
activ_sample = reparam(mu_activations, var_activations.log(), do_sample=do_sample)
return torch.sqrt(regularized_scale_sample) * activ_sample
def kl(self):
return super(RegularizedHorseshoeLayer, self).kl() + self.c.kl()
class NodeSpecificRegularizedHorseshoeLayer(RegularizedHorseshoeLayer):
"""
Uses the regularized Horseshoe distribution. The regularized Horseshoe soft thresholds the tails of the Horseshoe.
For all weights w_k incident upon node k in the layer we have:
w_k ~ N(0, (tau_k * v)^2 I) N(0, c_k^2 I), c_k^2 ~ InverseGamma(a, b).
c_k^2 controls the scale of the thresholding. As c_k^2 -> infinity, the regularized Horseshoe -> Horseshoe
Note that we now have a per-node c_k.
"""
def __init__(self, in_features, out_features, cuda=False, scale=1., c_a=2., c_b=6.):
super(NodeSpecificRegularizedHorseshoeLayer, self).__init__(in_features, out_features, cuda=cuda, scale=scale)
self.c = InvGammaLayer(a=c_a, b=c_b, out_features=out_features)
<s> import numpy as np
import torch
from uq360.models.noise_models.homoscedastic_noise_models import GaussianNoiseFixedPrecision
def compute_test_ll(y_test, y_pred_samples, std_y=1.):
"""
Computes test log likelihoods = (1 / Ntest) * \\sum_n p(y_n | x_n, D_train)
:param y_test: True y
:param y_pred_samples: y^s = f(x_test, w^s); w^s ~ q(w). S x Ntest, where S is the number of samples
q(w) is either a trained variational posterior or an MCMC approximation to p(w | D_train)
:param std_y: True std of y (assumed known)
"""
S, _ = y_pred_samples.shape
noise = GaussianNoiseFixedPrecision(std_y=std_y)
ll = noise.loss(y_pred=y_pred_samples, y_true=y_test.unsqueeze(dim=0), reduce_sum=False)
ll = torch.logsumexp(ll, dim=0) - np.log(S) # mean over num samples
return torch.mean(ll) # mean over test points
<s> from abc import ABC
import torch
from torch import nn
from uq360.models.bayesian_neural_networks.layers import BayesianLinearLayer
from uq360.models.noise_models.homoscedastic_noise_models import GaussianNoiseGammaPrecision
import numpy as np
td = torch.distributions
class BayesianNN(nn.Module, ABC):
"""
Bayesian neural network with zero mean Gaussian priors over weights.
"""
def __init__(self, layer=BayesianLinearLayer, ip_dim=1, op_dim=1, num_nodes=50,
activation_type='relu', num_layers=1):
super(BayesianNN, self).__init__()
self.num_layers = num_layers
if activation_type == 'relu':
# activation
self.activation = nn.ReLU()
elif activation_type == 'tanh':
self.activation = nn.Tanh()
else:
print("Activation Type not supported")
self.fc_hidden = []
self.fc1 = layer(ip_dim, num_nodes,)
for _ in np.arange(self.num_layers - 1):
self.fc_hidden.append(layer(num_nodes, num_nodes, ))
self.fc_out = layer(num_nodes, op_dim, )
self.noise_layer = None
def forward(self, x, do_sample=True):
x = self.fc1(x, do_sample=do_sample)
x = self.activation(x)
for layer in self.fc_hidden:
x = layer(x, do_sample=do_sample)
x = self.activation(x)
return self.fc_out(x, do_sample=do_sample, scale_variances=True)
def kl_divergence_w(self):
kld = self.fc1.kl() + self.fc_out.kl()
for layer in self.fc_hidden:
kld += layer.kl()
return kld
def prior_predictive_samples(self, n_sample=100):
n_eval = 1000
x = torch.linspace(-2, 2, n_eval)[:, np.newaxis]
y = np.zeros([n_sample, n_eval])
for i in np.arange(n_sample):
y[i] = self.forward(x).data.numpy().ravel()
return x.data.numpy(), y
### get and set weights ###
def get_weights(self):
assert len(self.fc_hidden) == 0 # only works for one layer networks.
weight_dict = {}
weight_dict['layerip_means'] = torch.cat([self.fc1.weights, self.fc1.bias.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerip_logvar'] = torch.cat([self.fc1.weights_logvar, self.fc1.bias_logvar.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerop_means'] = torch.cat([self.fc_out.weights, self.fc_out.bias.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerop_logvar'] = torch.cat([self.fc_out.weights_logvar, self.fc_out.bias_logvar.unsqueeze(1)], dim=1).data.numpy()
return weight_dict
def set_weights(self, weight_dict):
assert len(self.fc_hidden) == 0 # only works for one layer networks.
to_param = lambda x: nn.Parameter(torch.Tensor(x))
self.fc1.weights = to_param(weight_dict['layerip_means'][:, :-1])
self.fc1.weights = to_param(weight_dict['layerip_logvar'][:, :-1])
self.fc1.bias = to_param(weight_dict['layerip_means'][:, -1])
self.fc1.bias_logvar = to_param(weight_dict['layerip_logvar'][:, -1])
self.fc_out.weights = to_param(weight_dict['layerop_means'][:, :-1])
self.fc_out.weights = to_param(weight_dict['layerop_logvar'][:, :-1])
self.fc_out.bias = to_param(weight_dict['layerop_means'][:, -1])
self.fc_out.bias_logvar = to_param(weight_dict['layerop_logvar'][:, -1])
class BayesianRegressionNet(BayesianNN, ABC):
"""
Bayesian neural net with N(y_true | f(x, w), \\lambda^-1); \\lambda ~ Gamma(a, b) likelihoods.
"""
def __init__(self, layer=BayesianLinearLayer, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu',
num_layers=1):
super(BayesianRegressionNet, self).__init__(layer=layer, ip_dim=ip_dim, op_dim=op_dim,
num_nodes=num_nodes, activation_type=activation_type,
num_layers=num_layers,
)
self.noise_layer = GaussianNoiseGammaPrecision(a0=6., b0=6.)
def likelihood(self, x=None, y=None):
out = self.forward(x)
return -self.noise_layer.loss(y_pred=out, y_true=y)
def neg_elbo(self, num_batches, x=None, y=None):
# scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo.
Elik = self.likelihood(x, y)
neg_elbo = (self.kl_divergence_w() + self.noise_layer.kl()) / num_batches - Elik
return neg_elbo
def mse(self, x, y):
"""
scaled rmse (scaled by 1 / std_y**2)
"""
E_noise_precision = 1. / self.noise_layer.get_noise_var()
return (0.5 * E_noise_precision * (self.forward(x, do_sample=False) - y)**2).sum()
def get_noise_var(self):
return self.noise_layer.get_noise_var()
class BayesianClassificationNet(BayesianNN, ABC):
"""
Bayesian neural net with Categorical(y_true | f(x, w)) likelihoods. Use for classification.
"""
def __init__(self, layer=BayesianLinearLayer, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu',
num_layers=1):
super(BayesianClassificationNet, self).__init__(layer=layer, ip_dim=ip_dim, op_dim=op_dim,
num_nodes=num_nodes, activation_type=activation_type,
num_layers=num_layers)
self.noise_layer = torch.nn.CrossEntropyLoss(reduction='sum')
def likelihood(self, x=None, y=None):
out = self.forward(x)
return -self.noise_layer(out, y)
def neg_elbo(self, num_batches, x=None, y=None):
# scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo.
Elik = self.likelihood(x, y)
neg_elbo = self.kl_divergence_w() / num_batches - Elik
return neg_elbo
<s><s> from abc import ABC
import numpy as np
import torch
from torch import nn
from uq360.models.bayesian_neural_networks.layers import HorseshoeLayer, BayesianLinearLayer, RegularizedHorseshoeLayer
from uq360.models.noise_models.homoscedastic_noise_models import GaussianNoiseGammaPrecision
import numpy as np
td = torch.distributions
class HshoeBNN(nn.Module, ABC):
"""
Bayesian neural network with Horseshoe layers.
"""
def __init__(self, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu', num_layers=1,
hshoe_scale=1e-1, use_reg_hshoe=False):
if use_reg_hshoe:
layer = RegularizedHorseshoeLayer
else:
layer = HorseshoeLayer
super(HshoeBNN, self).__init__()
self.num_layers = num_layers
if activation_type == 'relu':
# activation
self.activation = nn.ReLU()
elif activation_type == 'tanh':
self.activation = nn.Tanh()
else:
print("Activation Type not supported")
self.fc_hidden = []
self.fc1 = layer(ip_dim, num_nodes, scale=hshoe_scale)
for _ in np.arange(self.num_layers - 1):
self.fc_hidden.append(layer(num_nodes, num_nodes))
self.fc_out = BayesianLinearLayer(num_nodes, op_dim)
self.noise_layer = None
def forward(self, x, do_sample=True):
x = self.fc1(x, do_sample=do_sample)
x = self.activation(x)
for layer in self.fc_hidden:
x = layer(x, do_sample=do_sample)
x = self.activation(x)
return self.fc_out(x, do_sample=do_sample, scale_variances=True)
def kl_divergence_w(self):
kld = self.fc1.kl() + self.fc_out.kl()
for layer in self.fc_hidden:
kld += layer.kl()
return kld
def fixed_point_updates(self):
if hasattr(self.fc1, 'fixed_point_updates'):
self.fc1.fixed_point_updates()
if hasattr(self.fc_out, ' |
fixed_point_updates'):
self.fc_out.fixed_point_updates()
for layer in self.fc_hidden:
if hasattr(layer, 'fixed_point_updates'):
layer.fixed_point_updates()
def prior_predictive_samples(self, n_sample=100):
n_eval = 1000
x = torch.linspace(-2, 2, n_eval)[:, np.newaxis]
y = np.zeros([n_sample, n_eval])
for i in np.arange(n_sample):
y[i] = self.forward(x).data.numpy().ravel()
return x.data.numpy(), y
### get and set weights ###
def get_weights(self):
assert len(self.fc_hidden) == 0 # only works for one layer networks.
weight_dict = {}
weight_dict['layerip_means'] = torch.cat([self.fc1.weights, self.fc1.bias.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerip_logvar'] = torch.cat([self.fc1.weights_logvar, self.fc1.bias_logvar.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerop_means'] = torch.cat([self.fc_out.weights, self.fc_out.bias.unsqueeze(1)], dim=1).data.numpy()
weight_dict['layerop_logvar'] = torch.cat([self.fc_out.weights_logvar, self.fc_out.bias_logvar.unsqueeze(1)], dim=1).data.numpy()
return weight_dict
def set_weights(self, weight_dict):
assert len(self.fc_hidden) == 0 # only works for one layer networks.
to_param = lambda x: nn.Parameter(torch.Tensor(x))
self.fc1.weights = to_param(weight_dict['layerip_means'][:, :-1])
self.fc1.weights = to_param(weight_dict['layerip_logvar'][:, :-1])
self.fc1.bias = to_param(weight_dict['layerip_means'][:, -1])
self.fc1.bias_logvar = to_param(weight_dict['layerip_logvar'][:, -1])
self.fc_out.weights = to_param(weight_dict['layerop_means'][:, :-1])
self.fc_out.weights = to_param(weight_dict['layerop_logvar'][:, :-1])
self.fc_out.bias = to_param(weight_dict['layerop_means'][:, -1])
self.fc_out.bias_logvar = to_param(weight_dict['layerop_logvar'][:, -1])
class HshoeRegressionNet(HshoeBNN, ABC):
"""
Horseshoe net with N(y_true | f(x, w), \\lambda^-1); \\lambda ~ Gamma(a, b) likelihoods.
"""
def __init__(self, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu',
num_layers=1, hshoe_scale=1e-5, use_reg_hshoe=False):
super(HshoeRegressionNet, self).__init__(ip_dim=ip_dim, op_dim=op_dim,
num_nodes=num_nodes, activation_type=activation_type,
num_layers=num_layers,
hshoe_scale=hshoe_scale,
use_reg_hshoe=use_reg_hshoe)
self.noise_layer = GaussianNoiseGammaPrecision(a0=6., b0=6.)
def likelihood(self, x=None, y=None):
out = self.forward(x)
return -self.noise_layer.loss(y_pred=out, y_true=y)
def neg_elbo(self, num_batches, x=None, y=None):
# scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo.
Elik = self.likelihood(x, y)
neg_elbo = (self.kl_divergence_w() + self.noise_layer.kl()) / num_batches - Elik
return neg_elbo
def mse(self, x, y):
"""
scaled rmse (scaled by 1 / std_y**2)
"""
E_noise_precision = 1. / self.noise_layer.get_noise_var()
return (0.5 * E_noise_precision * (self.forward(x, do_sample=False) - y)**2).sum()
def get_noise_var(self):
return self.noise_layer.get_noise_var()
class HshoeClassificationNet(HshoeBNN, ABC):
"""
Horseshoe net with Categorical(y_true | f(x, w)) likelihoods. Use for classification.
"""
def __init__(self, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu',
num_layers=1, hshoe_scale=1e-5, use_reg_hshoe=False):
super(HshoeClassificationNet, self).__init__(ip_dim=ip_dim, op_dim=op_dim,
num_nodes=num_nodes, activation_type=activation_type,
num_layers=num_layers,
hshoe_scale=hshoe_scale,
use_reg_hshoe=use_reg_hshoe)
self.noise_layer = torch.nn.CrossEntropyLoss(reduction='sum')
def likelihood(self, x=None, y=None):
out = self.forward(x)
return -self.noise_layer(out, y)
def neg_elbo(self, num_batches, x=None, y=None):
# scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo.
Elik = self.likelihood(x, y)
neg_elbo = (self.kl_divergence_w()) / num_batches - Elik
return neg_elbo
<s> import abc
import sys
# Ensure compatibility with Python 2/3
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta(str('ABC'), (), {})
class AbstractNoiseModel(ABC):
""" Abstract class. All noise models inherit from here.
"""
def __init__(self, *argv, **kwargs):
""" Initialize an AbstractNoiseModel object.
"""
@abc.abstractmethod
def loss(self, *argv, **kwargs):
""" Compute loss given predictions and groundtruth labels
"""
raise NotImplementedError
@abc.abstractmethod
def get_noise_var(self, *argv, **kwargs):
"""
Return the current estimate of noise variance
"""
raise NotImplementedError
<s> import math
import numpy as np
import torch
from scipy.special import gammaln
from uq360.models.noise_models.noisemodel import AbstractNoiseModel
from torch.nn import Parameter
td = torch.distributions
def transform(a):
return torch.log(1 + torch.exp(a))
class GaussianNoise(torch.nn.Module, AbstractNoiseModel):
"""
N(y_true | f_\\mu(x, w), f_\\sigma^2(x, w))
"""
def __init__(self, cuda=False):
super(GaussianNoise, self).__init__()
self.cuda = cuda
self.const = torch.log(torch.FloatTensor([2 * math.pi]))
def loss(self, y_true=None, mu_pred=None, log_var_pred=None, reduce_mean=True):
"""
computes -1 * ln N (y_true | mu_pred, softplus(log_var_pred))
:param y_true:
:param mu_pred:
:param log_var_pred:
:return:
"""
var_pred = transform(log_var_pred)
ll = -0.5 * self.const - 0.5 * torch.log(var_pred) - 0.5 * (1. / var_pred) * ((mu_pred - y_true) ** 2)
if reduce_mean:
return -ll.mean(dim=0)
else:
return -ll.sum(dim=0)
def get_noise_var(self, log_var_pred):
return transform(log_var_pred)
<s><s> import math
import numpy as np
import torch
from scipy.special import gammaln
from uq360.models.noise_models.noisemodel import AbstractNoiseModel
from torch.nn import Parameter
td = torch.distributions
def transform(a):
return torch.log(1 + torch.exp(a))
class GaussianNoiseGammaPrecision(torch.nn.Module, AbstractNoiseModel):
"""
N(y_true | f(x, w), \\lambda^-1); \\lambda ~ Gamma(a, b).
Uses a variational approximation; q(lambda) = Gamma(ahat, bhat)
"""
def __init__(self, a0=6, b0=6, cuda=False):
super(GaussianNoiseGammaPrecision, self).__init__()
self.cuda = cuda
self.a0 = a0
self.b0 = b0
self.const = torch.log(torch.FloatTensor([2 * math.pi]))
# variational parameters
self.ahat = Parameter(torch.FloatTensor([10.]))
self.bhat = Parameter(torch.FloatTensor([3.]))
def loss(self, y_pred=None, y_true=None):
"""
computes -1 * E_q(\\lambda)[ln N (y_pred | y_true, \\lambda^-1)], where q(lambda) = Gamma(ahat, bhat)
:param y_pred:
:param y_true:
:return:
"""
n = y_pred.shape[0]
ahat = transform(self.ahat)
bhat = transform(self.bhat)
return -1 * (-0.5 * n * self.const + 0.5 * n * (torch.digamma(ahat) - torch.log(bhat)) \\
- 0.5 * (ahat/bhat) * ((y_pred - y_true) ** 2).sum())
def kl(self):
ahat = transform(self.ahat)
bhat = transform(self.bhat)
return (ahat - self.a0) * torch.digamma(ahat) - torch.lgamma(ahat) + gammaln(self.a0) + \\
self.a0 * (torch.log(bhat) - np.log(self.b0)) + ahat * (self.b0 - bhat) / bhat
def get_noise_var(self):
ahat = transform(self.ahat)
bhat = transform(self.bhat)
return (bhat / ahat).data.numpy()[0]
class GaussianNoiseFixedPrecision(torch.nn.Module, AbstractNoiseModel):
"""
N(y_true | f(x, w), sigma_y**2); known sigma_y
"""
def __init__(self, std_y=1., cuda=False):
super(GaussianNoiseFixedPrecision, self).__init__()
self.cuda = cuda
self.const = torch.log(torch.FloatTensor([2 * math.pi]))
self.sigma_y = std_y
def loss(self, y_pred=None, y_true=None):
"""
computes -1 * ln N (y_pred | y_true, sigma_y**2)
:param y_pred:
:param y_true:
:return:
"""
ll = -0.5 * self.const - np.log(self.sigma_y) - 0.5 * (1. / self.sigma_y ** 2) * ((y_pred - y_true) ** 2)
return -ll.sum(dim=0)
def get_noise_var(self):
return self.sigma_y ** 2<s><s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import numpy as np
import logging
import warnings
from sklearn.ensemble import VotingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Ridge
from sklearn.preprocessing import binarize
from sklearn.ensemble import VotingRegressor
from sklearn.svm import SVC
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from learner.aion_matrix import aion_matrix
warnings.filterwarnings('always')
class ensemble_voting():
def __init__(self,ensemble_params,scoreParam):
self.ensemble_params = ensemble_params
self.scoreParam=scoreParam
self.final_estimator_r=''
self.final_estimator_c=''
self.log = logging.getLogger('eion')
''' Read the aion config "Ensemble-Voting", parse the algorithm and associated params based on enable or True status.Not used now '''
def getSelected_algs_params(self,problemType,ensembleType,ensembleConfig):
from learner.parameters import parametersDefine
paramObj=parametersDefine()
ensClass_algs_params={}
# algs_status={}
for key,val in ensembleConfig.items():
for s,p in val.items():
if (s == "enable" and p == "True"):
params = val['param']
params_eval = paramObj.paramDefine(params,None) |
params_eval = {param_key: param_value[0] for param_key, param_value in params_eval.items()}
ensClass_algs_params[key]=params_eval
else:
pass
return ensClass_algs_params
''' To make array of voting algorithm based on user config list. Not used now, in future if needed similar line with bagging ensemble, please use this. '''
def listEnsembleClassVotingAlgs(self,ensClass_algs_params):
ensembleVotingClassList=list()
for key,val in ensClass_algs_params.items():
if (key == 'Logistic Regression'):
lr=LogisticRegression()
lr=lr.set_params(**val)
ensembleVotingClassList.append(lr)
elif (key == 'Support Vector Machine'):
svm=SVC()
svm=svm.set_params(**val)
ensembleVotingClassList.append(svm)
elif (key == 'Naive Bayes'):
nb=GaussianNB()
nb=nb.set_params(**val)
ensembleVotingClassList.append(nb)
elif (key == 'K Nearest Neighbors'):
knn=KNeighborsClassifier()
knn=knn.set_params(**val)
ensembleVotingClassList.append(knn)
elif (key == 'Decision Tree'):
dt=DecisionTreeClassifier()
dt=dt.set_params(**val)
ensembleVotingClassList.append(dt)
elif (key == 'Random Forest'):
rf=RandomForestClassifier()
rf=rf.set_params(**val)
ensembleVotingClassList.append(rf)
else:
## Algorithm not found in config, so forming empty alg list. If needs, make list with default alg.
ensembleVotingClassList=[]
pass
return ensembleVotingClassList
''' To make array of voting regression algorithm based on user config list. Not used now, in future if needed similar line with bagging ensemble, please use this. '''
def listEnsembleRegVotingAlgs(self,ensReg_algs_params):
ensembleVotingRegList=list()
for key,val in ensReg_algs_params.items():
if (key == 'Linear Regression'):
lir=LinearRegression()
lir=lir.set_params(**val)
ensembleVotingRegList.append(lir)
elif (key == 'Decision Tree'):
dtr=DecisionTreeRegressor()
dtr=dtr.set_params(**val)
ensembleVotingRegList.append(dtr)
elif (key == 'Ridge'):
ridge=Ridge()
ridge=ridge.set_params(**val)
ensembleVotingRegList.append(ridge)
else:
## Algorithm not found in config, so forming empty alg list. If needs, make list with default alg.
ensembleVotingRegList=[]
return ensembleVotingRegList
def ensemble_voting_classifier(self,X_train,y_train, X_test, y_test,MakeFP0,MakeFN0,modelList):
#bug 12437
status='ERROR'
model=None
estimator=None
score=None
params=None
threshold = -1
precisionscore =-1
recallscore = -1
objClf = aion_matrix()
try:
lr = LogisticRegression(solver='lbfgs',random_state=1,max_iter=200)
rf = RandomForestClassifier(random_state=1)
gnb = GaussianNB()
svc = SVC(probability=True) #Need to keep probability=True, because cross_val_score,predict_proba fn calls
knn=KNeighborsClassifier(n_neighbors=5)
base_estimators = []
if 'Logistic Regression' in modelList:
base_estimators.append(('LogisticRegression', lr))
self.log.info('-------- Ensemble: Logistic Regression-------')
if 'Random Forest' in modelList:
base_estimators.append(('RandomForestClassifier', rf))
self.log.info('-------- Ensemble: Random Forest-------')
if 'Naive Bayes' in modelList:
base_estimators.append(('GaussianNB', gnb))
self.log.info('-------- Ensemble: Naive Bayes-------')
if 'Support Vector Machine' in modelList:
self.log.info('-------- Ensemble: Support Vector Machine-------')
base_estimators.append(('SVC', svc))
if 'K Nearest Neighbors' in modelList:
base_estimators.append(('KNeighborsClassifier', knn))
self.log.info('-------- Ensemble: K Nearest Neighbors-------')
if len(base_estimators) == 0:
self.log.info('-------- Ensemble Voting is only supported for Logistic Regression, Random Forest Classifier, Naive Bayes, SVM and KNN -------')
status = "UNSUPPORTED"
return status, estimator,params,score,model,threshold,precisionscore,recallscore
eclf1 = VotingClassifier(base_estimators, voting='soft')
eclf1.fit(X_train, y_train)
y_predict = eclf1.predict(X_test)
score = objClf.get_score(self.scoreParam,y_test,y_predict)
self.log.info('-------- Ensemble (VoteClassifier) Soft Score:'+str(score))
if MakeFP0:
self.log.info('-------- Ensemble: Calculate Threshold for FP Start-------')
startRange = 0.0
endRange = 1.0
stepsize = 0.01
threshold_range = np.arange(startRange,endRange,stepsize)
threshold,precisionscore,recallscore = objClf.check_threshold(eclf1,X_train,y_train,threshold_range,'FP','')
self.log.info('-------- Calculate Threshold for FP End-------')
elif MakeFN0:
self.log.info('-------- Ensemble: Calculate Threshold for FN Start-------')
startRange = 1.0
endRange = 0.0
stepsize = -0.01
threshold_range = np.arange(startRange,endRange,stepsize)
threshold,precisionscore,recallscore = objClf.check_threshold(eclf1,X_train,y_train,threshold_range,'FN','')
self.log.info('-------- Calculate Threshold for FN End-------')
if threshold != -1:
predictedData = eclf1.predict_proba(X_test)
predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold) #bug 12437
score = objClf.get_score(self.scoreParam,y_test,predictedData)
status = 'SUCCESS'
model =eclf1.__class__.__name__
estimator=eclf1
params = estimator.get_params()
#bug 12437 - Removed ensemble hard voting as predict_proba in the later stages will break
except Exception as Inst: #bug 12437
self.log.info('--------- Error in Ensemble Voting ---------\\n')
self.log.info(str(Inst))
return status,estimator,params,score,model,threshold,precisionscore,recallscore
def ensemble_voting__regressor(self,X_train,y_train, X_test, y_test,modelList):
scoredetails = ''
vr_predict=None
vr_model=None
try:
lr = LinearRegression()
rfr = RandomForestRegressor(n_estimators=10, random_state=1)
dtr=DecisionTreeRegressor()
base_estimators = []
if 'Linear Regression' in modelList:
base_estimators.append(('LinearRegression', lr))
if 'Decision Tree' in modelList:
base_estimators.append(('DecisionTreeRegressor', dtr))
if 'Random Forest' in modelList:
base_estimators.append(('RandomForestRegressor', rfr))
if len(base_estimators) == 0:
base_estimators = [('LinearRegression', lr), ('RandomForestRegressor', rfr),('DecisionTreeRegressor', dtr)]
voting_reg = VotingRegressor(base_estimators)
vr_model=voting_reg.fit(X_train,y_train)
vr_predict=voting_reg.predict(X_test)
best_vr_alg=voting_reg.__class__.__name__
self.log.info('-----------> Voting regression Model '+str(best_vr_alg))
except Exception as e:
self.log.info("voting regression Exception info: \\n")
self.log.info(e)
aion_matrixobj = aion_matrix()
score = aion_matrixobj.get_score(self.scoreParam,y_test,vr_predict)
return voting_reg,voting_reg.get_params(),score,best_vr_alg
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import time
import os
import sys
import numpy as np
import pandas as pd
from sklearn import model_selection
from sklearn.model_selection import train_test_split, KFold, cross_val_score
from sklearn.model_selection import KFold
#Classification metrics lib
import logging
import warnings
warnings.filterwarnings('always') # "error", "ignore", "always", "default", "module" or "once"
from learner.aion_matrix import aion_matrix
from sklearn.preprocessing import binarize
class ensemble_bagging():
def __init__(self,ensemble_params,scoreParam,MakeFP0,MakeFN0):
self.ensemble_params = ensemble_params
self.scoreParam=scoreParam
self.MakeFP0 = MakeFP0
self.MakeFN0 = MakeFN0
self.log = logging.getLogger('eion')
def add_alg2dict(self,k,v):
b_dict={}
b_dict[k]=v
return b_dict
def getSelected_algs_params(self,problemType,ensembleType,ensembleConfig):
from learner.parameters import parametersDefine
paramObj=parametersDefine()
ensClass_algs_params={}
algs_status={}
for key,val in ensembleConfig.items():
for s,p in val.items():
if (s == "enable" and p == "True"):
params = val['param']
params_eval = paramObj.paramDefine(params,None)
params_eval = {param_key: param_value[0] for param_key, param_value in params_eval.items()}
ensClass_algs_params[key]=params_eval
else:
pass
return ensClass_algs_params
def listEnsembleClassBaggingAlgs(self,ensClass_algs_params):
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier, ExtraTreesClassifier, RandomForestClassifier
ensembleBaggingClassList=list()
for key,val in ensClass_algs_params.items():
if (key == 'Logistic Regression'):
lr=LogisticRegression()
lr=lr.set_params(**val)
ensembleBaggingClassList.append(lr)
elif (key == 'Support Vector Machine'):
svm=SVC()
svm=svm.set_params(**val)
ensembleBaggingClassList.append(svm)
elif (key == 'Naive Bayes'):
nb=GaussianNB()
nb=nb.set_params(**val)
ensembleBaggingClassList.append(nb)
elif (key == 'K Nearest Neighbors'):
knn=KNeighborsClassifier()
knn=knn.set_params(**val)
ensembleBaggingClassList.append(knn)
elif (key == 'Decision Tree'):
dt=DecisionTreeClassifier()
dt=dt.set_params(**val)
ensembleBaggingClassList.append(dt)
elif (key == 'Random Forest'):
rf=RandomForestClassifier()
rf=rf.set_params(**val)
ensembleBaggingClassList.append(rf)
else:
pass
return ensembleBaggingClassList
def listEnsembleRegBaggingAlgs(self,ensReg_algs_params):
from sklearn.linear_model import Ridge
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
ensembleBaggingRegList=list()
for key,val in ensReg_algs_ |
params.items():
if (key == 'Linear Regression'):
lir=LinearRegression()
lir=lir.set_params(**val)
ensembleBaggingRegList.append(lir)
elif (key == 'Decision Tree'):
dtr=DecisionTreeRegressor()
dtr=dtr.set_params(**val)
ensembleBaggingRegList.append(dtr)
elif (key == 'Ridge'):
ridge=Ridge()
ridge=ridge.set_params(**val)
ensembleBaggingRegList.append(ridge)
else:
ensembleBaggingRegList=[]
return ensembleBaggingRegList
def ensemble_bagging_classifier(self,X_train,y_train, X_test, y_test):
## New changes
from sklearn.ensemble import BaggingClassifier, ExtraTreesClassifier, RandomForestClassifier
ensemble_method = "Bagging_classifier"
problemType='classification'
ensembleType='bagging'
model_dict=self.ensemble_params
ensClass_algs_params = self.getSelected_algs_params(problemType,ensembleType,model_dict)
ensembleBaggingList = self.listEnsembleClassBaggingAlgs(ensClass_algs_params)
# clf_array = model_list
clf_array=ensembleBaggingList
# no. of base classifier
num_trees = len(clf_array)
# max_samples=float(max_samples)
n_estimators = num_trees
# random_state=seed
bagging_mean={}
bagging_std={}
accuracy_basealgs_train={}
accuracy_basealgs_test={}
blable=""
accuracy_score_test=0
kfold = model_selection.KFold(n_splits=10, random_state=None)
bestScore=-0xFFFF
scoredetails = ''
threshold = -1
bestthreshold = -1
precisionscore =-1
bestprecisionscore=-1
recallscore = -1
bestrecallscore=-1
objClf = aion_matrix()
if (ensemble_method == "Bagging_classifier"):
#bagging ensemble of base classifier .e.g. KNeighborsClassifier base estimators, each built on random subsets of 40% of the samples and 50% of the features.
for clf in clf_array:
self.log.info('-----------> Ensemble Algorithm '+str(clf.__class__.__name__))
clf.fit(X_train, y_train)
bagging_clf = BaggingClassifier(clf,n_estimators = num_trees, random_state=10)
bagging_clf.fit(X_train, y_train)
bagging_scores = cross_val_score(bagging_clf, X_train, y_train, cv=kfold,n_jobs=-1)
#bagging_ensemble_t=bagging_clf.fit(X_train, y_train)
if not X_test.empty:
bag_predict=bagging_clf.predict(X_test)
accuracy_score_test = objClf.get_score(self.scoreParam,y_test,bag_predict)
else:
accuracy_score_test = bagging_scores
MakeFP0 = False
MakeFN0 = False
if self.MakeFP0:
self.log.info('-------- Ensemble: Calculate Threshold for FP Start-------')
startRange = 0.0
endRange = 1.0
stepsize = 0.01
threshold_range = np.arange(startRange,endRange,stepsize)
threshold,precisionscore,recallscore = objClf.check_threshold(bagging_clf,X_train,y_train,threshold_range,'FP','')
MakeFP0 = True
self.log.info('-------- Calculate Threshold for FP End-------')
if self.MakeFN0:
self.log.info('-------- Ensemble: Calculate Threshold for FN Start-------')
startRange = 1.0
endRange = 0.0
stepsize = -0.01
threshold_range = np.arange(startRange,endRange,stepsize)
threshold,precisionscore,recallscore = objClf.check_threshold(bagging_clf,X_train,y_train,threshold_range,'FN','')
MakeFN0 = True
self.log.info('-------- Calculate Threshold for FN End-------')
if threshold != -1:
if not X_test.empty:
predictedData = bagging_clf.predict_proba(X_test)
predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold) #bug 12437
accuracy_score_test = objClf.get_score(self.scoreParam,y_test,predictedData)
status,bscore,bthres,brscore,bpscore = objClf.getBestModel(MakeFP0,MakeFN0,threshold,bestthreshold,recallscore,bestrecallscore,precisionscore,bestprecisionscore,accuracy_score_test,bestScore)
if status:
bestScore =bscore
bestModel =bagging_clf.__class__.__name__
bestEstimator=bagging_clf
bestthreshold = bthres
bestBaseModel = clf.__class__.__name__
bestrecallscore = brscore
bestprecisionscore = bpscore
else:
pass
best_alg_name=bestEstimator.__class__.__name__
self.log.info('-----------> Best Bagging Classifier Model '+str(bestBaseModel))
self.log.info('-----------> Best Score '+str(bestScore))
# self.log.info('-----------> Threshold '+str(bestthreshold)) #bug 12438
if bestthreshold != -1:
if not X_test.empty:
predictedData_test = bestEstimator.predict_proba(X_test)
predictedData_test = binarize(predictedData_test[:,1].reshape(-1, 1),threshold=bestthreshold) #bug 12437
predictedData_train = bestEstimator.predict_proba(X_train)
predictedData_train = binarize(predictedData_train[:,1].reshape(-1, 1),threshold=bestthreshold) #bug 12437
else:
if not X_test.empty:
predictedData_test = bestEstimator.predict(X_test)
predictedData_train = bestEstimator.predict(X_train)
return bestEstimator,bestEstimator.get_params(),bestScore,best_alg_name,bestthreshold,bestprecisionscore,bestrecallscore
def ensemble_bagging__regressor(self,X_train,y_train, X_test, y_test):
from sklearn.ensemble import BaggingRegressor
ensemble_method='Bagging_regressor'
problemType='regression'
ensembleType='bagging'
model_dict=self.ensemble_params
ensReg_algs_params = self.getSelected_algs_params(problemType,ensembleType,model_dict)
ensembleBaggingList = self.listEnsembleRegBaggingAlgs(ensReg_algs_params)
scoredetails = ''
aion_matrixobj = aion_matrix()
reg_array = ensembleBaggingList
num_trees = len(reg_array)
#self.log.info(num_trees)
# max_samples=float(max_samples)
n_estimators = num_trees
r_state=10
bestModel=''
bestParams={}
bestScore=-sys.float_info.max #extension of bugfix 11656
objClf = aion_matrix()
for reg in reg_array:
self.log.info('-----------> Ensemble Algorithm '+str(reg.__class__.__name__))
nmodel=reg.fit(X_train, y_train)
model = reg.__class__.__name__
estimator = BaggingRegressor(base_estimator=reg, random_state=r_state)
bagging_ensemble_t=estimator.fit(X_train, y_train)
predictedData = estimator.predict(X_test)
score = objClf.get_score(self.scoreParam,y_test,predictedData)
if self.scoreParam == "r2":
if score > bestScore:
bestScore =score
bestModel =model
bestEstimator=estimator
else:
if abs(score) < bestScore or bestScore == -sys.float_info.max: #extension of bugfix 11656
bestScore =abs(score)
bestModel =model
bestEstimator=estimator
best_alg_name=bestEstimator.__class__.__name__
self.log.info('-----------> Best Ensemble Algorithm '+str(bestModel))
return bestEstimator,bestEstimator.get_params(),bestScore,best_alg_name
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import numpy as np
#Classification metrics lib
import logging
import warnings
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import binarize
from sklearn.svm import SVC
from sklearn.ensemble import StackingClassifier
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import StackingRegressor
from sklearn.svm import LinearSVR
from sklearn.linear_model import RidgeCV
from sklearn.linear_model import LassoCV
from learner.aion_matrix import aion_matrix
warnings.filterwarnings('always') # "error", "ignore", "always", "default", "module" or "once"
class ensemble_stacking():
def __init__(self,ensemble_params,scoreParam):
self.ensemble_params = ensemble_params
self.scoreParam=scoreParam
self.final_estimator_r=''
self.final_estimator_c=''
self.log = logging.getLogger('eion')
## Read the aion config "Ensemble-Stacking", parse the algorithm and associated params based on enable or True status.
def getSelected_algs_params(self,problemType,ensembleType,ensembleConfig):
from learner.parameters import parametersDefine
paramObj=parametersDefine()
ensClass_algs_params={}
# algs_status={}
for key,val in ensembleConfig.items():
for s,p in val.items():
if (s == "enable" and p == "True"):
params = val['param']
params_eval = paramObj.paramDefine(params,None)
params_eval = {param_key: param_value[0] for param_key, param_value in params_eval.items()}
ensClass_algs_params[key]=params_eval
else:
pass
return ensClass_algs_params
## To make array of stacking algorithm based on user config list. Not used now, in future if needed similar line with bagging ensemble, please use this.
def listEnsembleClassStackingAlgs(self,ensClass_algs_params):
ensembleBaggingClassList=list()
for key,val in ensClass_algs_params.items():
# print(key)
if (key == 'Logistic Regression'):
lr=LogisticRegression()
lr=lr.set_params(**val)
ensembleBaggingClassList.append(lr)
elif (key == 'Support Vector Machine'):
svm=SVC()
svm=svm.set_params(**val)
ensembleBaggingClassList.append(svm)
elif (key == 'Naive Bayes'):
nb=GaussianNB()
nb=nb.set_params(**val)
ensembleBaggingClassList.append(nb)
elif (key == 'K Nearest Neighbors'):
knn=KNeighborsClassifier()
knn=knn.set_params(**val)
ensembleBaggingClassList.append(knn)
elif (key == 'Decision Tree'):
dt=DecisionTreeClassifier()
dt=dt.set_params(**val)
ensembleBaggingClassList.append(dt)
elif (key == 'Random Forest'):
rf=RandomForestClassifier()
rf=rf.set_params(**val)
ensembleBaggingClassList.append(rf)
else:
ensembleBaggingClassList=[]
pass
return ensembleBaggingClassList
## To make array of stacking regression algorithm based on user config list. Not used now, in future if needed similar line with bagging ensemble, please use this.
def listEnsembleRegStackingAlgs(self,ensReg_algs_params):
ensembleBaggingRegList=list()
for key,val in ensReg_algs_params.items():
if (key == 'LinearSVR'):
lir=LinearSVR()
lir=lir.set_params(**val)
ensembleBaggingRegList.append(lir)
elif (key == 'LinearRegression'):
lr=LinearRegression()
lr=lr.set_params(**val)
ensembleBaggingRegList.append(lr)
elif (key == 'LassoCV'):
lcv=LassoCV()
lcv=lcv.set_params(**val)
ensembleBaggingRegList.append(lcv)
elif (key == 'RandomForestRegressor'):
rfr=RandomForestRegressor()
rfr=rfr.set_params(**val)
ensembleBaggingRegList.append(rfr)
elif (key == 'RidgeCV'):
ridge=RidgeCV()
ridge=ridge.set_params(**val)
ensembleBaggingRegList.append(ridge)
else:
## NO algorithms found in configuration settings, instead of sending empty array,we can add any one of algorithms.
ensembleBaggingRegList=[]
return ensembleBaggingRegList
def extract_params(self,dict):
self.dict=dict
for k,v in self.dict.items():
return k,v
def stacking_params(self):
for k,v in |
self.ensemble_params.items():
try:
if (k == "max_features_percentage"):
max_features_percentage=float(v)
elif (k == "max_samples"):
max_samples=float(v)
elif (k == "seed"):
seed=int(v)
elif (k == "final_estimator_stack_c"):
final_estimator_c=str(v)
elif (k == "final_estimator_stack_r"):
final_estimator_r=str(v)
else:
self.log.info("Invalid Param in ensemble advanced configuration.\\n")
except Exception as e:
self.log.info("\\n Ensemble config param parsing error"+str(e))
continue
return final_estimator_c,final_estimator_r,seed,max_samples,max_features_percentage
def ensemble_stacking_classifier(self,X_train,y_train, X_test, y_test,MakeFP0,MakeFN0,modelList):
final_estimator_c,final_estimator_r,seed,max_samples,max_features_percentage= self.stacking_params()
final_estimator_c=""
final_estimator=final_estimator_c
scoredetails=''
lr = LogisticRegression(solver='lbfgs',random_state=1,max_iter=200)
rf = RandomForestClassifier(random_state=2)
gnb = GaussianNB()
svc = SVC(probability=True) #Need to keep probability=True, because of cross_val_score,predict_proba fn calls
knn=KNeighborsClassifier(n_neighbors=5)
try:
if (final_estimator == 'LogisticRegression'):
final_estimator_a=lr
elif (final_estimator == 'RandomForestClassifier'):
final_estimator_a=rf
elif (final_estimator == 'GaussianNB'):
final_estimator_a=gnb
elif (final_estimator == 'SVC'):
final_estimator_a=svc
elif (final_estimator == 'KNeighborsClassifier'):
final_estimator_a=knn
else:
final_estimator_a=lr
except Exception as e:
final_estimator_a=lr
self.log.info("Given stacking regression final estimator algorithm issue, using default one (LogisticRegression) as final_estimator now.\\n")
self.log.info(e)
#stacking estimators
base_estimators = []
if 'Logistic Regression' in modelList:
base_estimators.append(('LogisticRegression', lr))
if 'Random Forest' in modelList:
base_estimators.append(('RandomForestClassifier', rf))
if 'Naive Bayes' in modelList:
base_estimators.append(('GaussianNB', gnb))
if 'Support Vector Machine' in modelList:
base_estimators.append(('SVC', svc))
if 'K Nearest Neighbors' in modelList:
base_estimators.append(('KNeighborsClassifier', knn))
if len(base_estimators) == 0:
base_estimators = [('LogisticRegression', lr),('RandomForestClassifier', rf),('GaussianNB', gnb),('SVC', svc),('KNeighborsClassifier', knn)]
stacking_c = StackingClassifier(estimators=base_estimators, final_estimator=final_estimator_a)
stacking_c.fit(X_train, y_train)
y_predict=stacking_c.predict(X_test)
objClf = aion_matrix()
accuracy_score_test = objClf.get_score(self.scoreParam,y_test,y_predict)
MakeFP0 = False
MakeFN0 = False
threshold = -1
recallscore = -1
precisionscore =-1
if MakeFP0:
self.log.info('-------- Ensemble: Calculate Threshold for FP Start-------')
startRange = 0.0
endRange = 1.0
stepsize = 0.01
threshold_range = np.arange(startRange,endRange,stepsize)
threshold,precisionscore,recallscore = objClf.check_threshold(stacking_c,X_train,y_train,threshold_range,'FP','')
MakeFP0 = True
self.log.info('-------- Calculate Threshold for FP End-------')
elif MakeFN0:
self.log.info('-------- Ensemble: Calculate Threshold for FN Start-------')
startRange = 1.0
endRange = 0.0
stepsize = -0.01
threshold_range = np.arange(startRange,endRange,stepsize)
threshold,precisionscore,recallscore = objClf.check_threshold(stacking_c,X_train,y_train,threshold_range,'FN','')
MakeFN0 = True
self.log.info('-------- Calculate Threshold for FN End-------')
if threshold != -1:
predictedData = stacking_c.predict_proba(X_test)
predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold) #bug 12437
accuracy_score_test = objClf.get_score(self.scoreParam,y_test,predictedData)
best_alg_stacking=stacking_c.__class__.__name__
self.log.info('-----------> Best Stacking Classifier Model '+str(best_alg_stacking))
self.log.info('-----------> Best Score '+str(accuracy_score_test))
return stacking_c,stacking_c.get_params(),accuracy_score_test,best_alg_stacking,threshold,precisionscore,recallscore
def ensemble_stacking__regressor(self,X_train,y_train, X_test, y_test,modelList):
final_estimator_c,final_estimator_r,seed,max_samples,max_features_percentage= self.stacking_params()
final_estimator=final_estimator_r
final_estimator_a=None
scoredetails=''
lr=LinearRegression()
rcv=RidgeCV()
svr=LinearSVR()
lcv=LassoCV()
rf=RandomForestRegressor(random_state=42)
try:
if (final_estimator == 'LinearRegression'):
final_estimator_a=lr
if (final_estimator == 'RidgeCV'):
final_estimator_a=rcv
elif (final_estimator == 'LinearSVR'):
final_estimator_a=svr
elif (final_estimator == 'LassoCV'):
final_estimator_a=lcv
elif (final_estimator == 'RandomForestRegressor'):
final_estimator_a=rf
else:
#default is RidgeCV
final_estimator_a=rcv
except Exception as e:
self.log.info("stacking regression Exception info: \\n")
self.log.info(e)
final_estimator_a=rcv
base_estimators = []
if 'Linear Regression' in modelList:
base_estimators.append(('LinearRegression', lr))
if 'Ridge' in modelList:
base_estimators.append(('RidgeCV', rcv))
if 'LinearSVR' in modelList:
base_estimators.append(('LinearSVR', svr))
if 'Lasso' in modelList:
base_estimators.append(('LassoCV', lcv))
if 'Random Forest' in modelList:
base_estimators.append(('RandomForestRegressor', rf))
if len(base_estimators) == 0:
base_estimators = [('LinearRegression', lr),('RidgeCV', rcv),('LinearSVR', svr),('LassoCV', lcv),('RandomForestRegressor', rf)]
self.log.info("Stacking Base Alogs :"+str(base_estimators))
self.log.info("Final Estimator :"+final_estimator)
stacking_regressor = StackingRegressor(estimators=base_estimators,final_estimator=final_estimator_a)
stacking_r_model=stacking_regressor.fit(X_train, y_train)
stacking_rpredict=stacking_regressor.predict(X_test)
best_stacking_alg=stacking_regressor.__class__.__name__
#Accuracy
accuracy_score_best=stacking_regressor.score(X_test, y_test)
aion_matrixobj = aion_matrix()
score = aion_matrixobj.get_score(self.scoreParam,y_test,stacking_rpredict)
return stacking_regressor,stacking_regressor.get_params(),score,best_stacking_alg
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import numpy as np
import pandas as pd
import talos
from talos import Evaluate
import json
import sys
import time
import os
import tensorflow.keras.utils as kutils
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Dropout,LSTM,GRU,SimpleRNN,Flatten,Input
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Conv1D,MaxPooling1D
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error,make_scorer
from sklearn.metrics import mean_squared_error
import logging
import tensorflow as tf
import tensorflow.keras.backend as K
def rmse_m(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
def r_square(y_true, y_pred):
SS_res = K.sum(K.square(y_true-y_pred))
SS_tot = K.sum(K.square(y_true-K.mean(y_true)))
return (1 - SS_res/(SS_tot+K.epsilon()))
class DLRegressionModel:
def __init__(self,modelList, modelParams, scoreParam, cvSplit, featuresData,
targetData,testX,testY, method,randomMethod,roundLimit,best_feature_model):
self.modelList =modelList
self.modelParams =modelParams
self.scoreParam = scoreParam
self.cvSplit =cvSplit
self.featuresData =featuresData
self.targetData = targetData
self.testX = testX
self.testY = testY
self.method =method
#self.logFile = logFile
self.randomMethod=randomMethod
self.roundLimit=roundLimit
self.log = logging.getLogger('eion')
self.best_feature_model = best_feature_model
def RNNRegression(self,x_train,y_train,x_val,y_val,params):
tf.keras.backend.clear_session()
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
x_val = np.reshape(x_val, (x_val.shape[0], x_val.shape[1], 1))
model = Sequential()
if params['RNNType'] == "LSTM" :
if params['numRNNLayers'] > 1:
model.add(LSTM(params['first_neuron'],return_sequences=True,input_shape=(x_train.shape[1],1)))
for x in range(1,params['numRNNLayers']):
model.add(LSTM(params['first_neuron']))
else:
model.add(LSTM(params['first_neuron'],input_shape=(x_train.shape[1],1)))
elif params['RNNType'] == "GRU" :
if params['numRNNLayers'] > 1:
model.add(GRU(params['first_neuron'],return_sequences=True,input_shape=(x_train.shape[1],1)))
for x in range(1,params['numRNNLayers']):
model.add(GRU(params['first_neuron']))
else:
model.add(GRU(params['first_neuron'],input_shape=(x_train.shape[1],1)))
elif params['RNNType'] == "SimpleRNN" :
if params['numRNNLayers'] > 1:
model.add(SimpleRNN(params['first_neuron'],return_sequences=True,input_shape=(x_train.shape[1],1)))
for x in range(1,params['numRNNLayers']):
model.add(SimpleRNN(params['first_neuron']))
else:
model.add(SimpleRNN(params['first_neuron'],input_shape=(x_train.shape[1],1)))
talos.utils.hidden_layers(model, params, 1)
model.add(Dense(1,activation=params['activation']))
model.compile(loss=params['losses'],optimizer=params['optimizer'],metrics=['mae','mse',rmse_m,r_square])
out = model.fit(x_train, y_train, validation_data=(x_val, y_val), batch_size=params['batch_size'],
epochs=params['epochs'],verbose=0,shuffle=True)
return out, model
def SNNRegression(self,x_train,y_train,x_val,y_val,params):
tf.keras.backend.clear_session()
model = Sequential()
model.add(Dense(params['first_neuron'],input_dim=x_train.shape[1],activation=params['activation']))
talos.utils.hidden_layers(model, params, 1)
model.add(Dense(1, activation=params['activation']))
model.compile(loss= |
params['losses'], optimizer=params['optimizer'], metrics=['mae','mse',rmse_m,r_square])
out = model.fit(x=x_train,
y=y_train,
validation_data=(x_val, y_val),
epochs=params['epochs'],
batch_size=params['batch_size'],
verbose=0)
return out, model
def CNNRegression(self,x_train,y_train,x_val,y_val,params):
tf.keras.backend.clear_session()
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
self.log.info(x_train.shape)
x_val = np.reshape(x_val, (x_val.shape[0], x_val.shape[1], 1))
model = Sequential()
self.log.info(params['kernel_size'])
model.add(Conv1D(filters=params['first_neuron'], kernel_size=int(params['kernel_size']), activation=params['activation'], input_shape=(x_train.shape[1],1)) )
if params['numConvLayers'] > 1:
for x in range(1,params['numConvLayers']):
if params['MaxPool'] == "True":
model.add(MaxPooling1D(pool_size=2))
model.add(Conv1D(filters=8, kernel_size=int(params['kernel_size']), activation=params['activation']))
talos.utils.hidden_layers(model, params, 1)
model.add(Flatten())
model.add(Dense(1))
model.compile(loss=params['losses'],optimizer=params['optimizer'],metrics=['mae','mse',rmse_m,r_square])
out = model.fit(x_train, y_train, validation_data=(x_val, y_val), batch_size=params['batch_size'],
epochs=params['epochs'],verbose=0,shuffle=True)
return out, model
def TalosScan(self,modelObj):
try:
#dataPath = pd.read_csv(self.dataLocation)
#X = dataPath.drop(self.targetData, axis=1)
X = self.featuresData
x = X.values
loss_matrix = 'mean_absolute_error'
optimizer='Nadam'
Y= self.targetData
y = Y.values
XSNN = X.values
X1 = np.expand_dims(X, axis=2)
scoredetails = ''
kf = KFold(n_splits = self.cvSplit)
for train_index, test_index in kf.split(X):
X_train, X_test = x[train_index], x[test_index]
y_train, y_test = y[train_index], y[test_index]
data = self.modelParams
models = data.keys()
lstart = time.time()
scoreSNN = []
scoreRNN = []
scoreCNN = []
scoreRNNGRU = []
scoreRNNLSTM = []
best_paramsSNN = {}
best_paramsRNN = {}
best_paramsRNNGRU = {}
best_paramsRNNLSTM = {}
best_paramsCNN = {}
if "Neural Network"in self.modelList:
self.log.info("-------> Model Name: Neural Network")
start = time.time()
data = self.modelParams["Neural Network"]
p = {"activation":data["activation"].split(","),
"optimizer":data["optimizer"].split(","),
"losses":data["losses"].split(","),
"first_neuron":[int(n) for n in data["first_layer"].split(",")],
"shapes": data["shapes"].split(","),
"hidden_layers":[int(n) for n in data["hidden_layers"].split(",")],
"dropout": [float(n) for n in data["dropout"].split(",")],
"lr": [float(n) for n in data["learning_rate"].split(",")],
"batch_size": [int(n) for n in data["batch_size"].split(",")],
"epochs": [int(n) for n in data["epochs"].split(",")]}
scan_object = talos.Scan(x=X_train,y=y_train,x_val = X_test,y_val = y_test,model = modelObj.SNNRegression,experiment_name='SNN',params=p,round_limit=self.roundLimit,random_method=self.randomMethod)
matrix_type = 'val_loss'
if self.scoreParam.lower() == 'rmse':
matrix_type = 'val_rmse_m'
elif(self.scoreParam.lower() == 'r2'):
matrix_type = 'val_r_square'
elif(self.scoreParam.lower() == 'mae'):
matrix_type = 'val_mae'
elif(self.scoreParam.lower() == 'mse'):
matrix_type = 'val_mse'
analyze_objectSNN = talos.Analyze(scan_object)
highValAccSNN = analyze_objectSNN.low(matrix_type)
dfSNN = analyze_objectSNN.data
newdfSNN = dfSNN.loc[dfSNN[matrix_type] == highValAccSNN]
best_paramsSNN["activation"] = list(newdfSNN["activation"])[0]
best_paramsSNN["optimizer"] = list(newdfSNN["optimizer"])[0]
best_paramsSNN["losses"] = list(newdfSNN["losses"])[0]
best_paramsSNN["first_layer"] = list(newdfSNN["first_neuron"])[0]
best_paramsSNN["shapes"] = list(newdfSNN["shapes"])[0]
best_paramsSNN["hidden_layers"] = list(newdfSNN["hidden_layers"])[0]
best_paramsSNN["dropout"] = list(newdfSNN["dropout"])[0]
best_paramsSNN["batch_size"] = list(newdfSNN["batch_size"])[0]
best_paramsSNN["epochs"] = list(newdfSNN["epochs"])[0]
best_paramsSNN["lr"] = list(newdfSNN["lr"])[0]
best_modelSNN = scan_object.best_model(metric=matrix_type, asc=True)
loss_matrix = best_paramsSNN["losses"]
optimizer = best_paramsSNN["optimizer"]
batchsize = best_paramsSNN["batch_size"]
if self.scoreParam == 'rmse':
best_modelSNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[rmse_m])
elif self.scoreParam == 'r2':
best_modelSNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[r_square])
elif self.scoreParam == 'mae':
best_modelSNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mae'])
else:
best_modelSNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mse'])
scoreSNN = best_modelSNN.evaluate(XSNN,Y, batch_size=batchsize)
self.log.info("----------> Score Matrix: "+str(best_modelSNN.metrics_names))
self.log.info("----------> Score: "+str(scoreSNN))
self.log.info("----------> Model Params: "+str(best_paramsSNN))
executionTime=time.time() - start
self.log.info('----------> SNN Execution Time: '+str(executionTime)+'\\n')
XSNN = self.testX.values
predictedData = best_modelSNN.predict(XSNN)
if self.scoreParam.lower() == 'mse':
score = mean_squared_error(self.testY,predictedData)
elif self.scoreParam.lower() == 'rmse':
score=mean_squared_error(self.testY,predictedData,squared=False)
elif self.scoreParam.lower() == 'mae':
score=mean_absolute_error(self.testY,predictedData)
elif self.scoreParam.lower() == 'r2':
score=r2_score(self.testY,predictedData)
else:
score = scoreSNN[1]
self.log.info("----------> Testing Score: "+str(score))
scoreSNN[1] = score
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"Neural Network","FeatureEngineering":"'+str(self.best_feature_model)+'","Score":'+str(scoreSNN[1])+'}'
self.log.info('Status:- |... DL Algorithm applied: Neural Network')
self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2)))
if "Recurrent Neural Network"in self.modelList:
self.log.info("-------> Model Name: Recurrent Neural Network")
start = time.time()
data = self.modelParams["Recurrent Neural Network"]
p = {"RNNType":["SimpleRNN"],
"numRNNLayers":[int(n) for n in data["numRNNLayers"].split(",")],
"activation":data["activation"].split(","),
"optimizer":data["optimizer"].split(","),
"losses":data["losses"].split(","),
"first_neuron":[int(n) for n in data["first_layer"].split(",")],
"shapes": data["shapes"].split(","),
"hidden_layers":[int(n) for n in data["hidden_layers"].split(",")],
"dropout": [float(n) for n in data["dropout"].split(",")],
"lr": [float(n) for n in data["learning_rate"].split(",")],
"batch_size": [int(n) for n in data["batch_size"].split(",")],
"epochs": [int(n) for n in data["epochs"].split(",")]}
scan_object = talos.Scan(x=X_train,y=y_train,x_val = X_test,y_val = y_test,model = modelObj.RNNRegression,experiment_name='RNN',params=p,round_limit=self.roundLimit,random_method=self.randomMethod)
matrix_type = 'val_loss'
if self.scoreParam.lower() == 'rmse':
matrix_type = 'val_rmse_m'
elif(self.scoreParam.lower() == 'r2'):
matrix_type = 'val_r_square'
elif(self.scoreParam.lower() == 'mae'):
matrix_type = 'val_mae'
elif(self.scoreParam.lower() == 'mse'):
matrix_type = 'val_mse'
analyze_objectRNN = talos.Analyze(scan_object)
highValAccRNN = analyze_objectRNN.low(matrix_type)
dfRNN = analyze_objectRNN.data
newdfRNN = dfRNN.loc[dfRNN[matrix_type] == highValAccRNN]
best_paramsRNN["RNNType"] = "SimpleRNN"
best_paramsRNN["numRNNLayers"] = list(newdfRNN["numRNNLayers"])[0]
best_paramsRNN["activation"] = list(newdfRNN["activation"])[0]
best_paramsRNN["optimizer"] = list(newdfRNN["optimizer"])[0]
best_paramsRNN["losses"] = list(newdfRNN["losses"])[0]
best_paramsRNN["first_layer"] = list(newdfRNN["first_neuron"])[0]
best_paramsRNN["shapes"] = list(newdfRNN["shapes"])[0]
best_paramsRNN["hidden_layers"] = list(newdfRNN["hidden_layers"])[0]
best_paramsRNN["dropout"] = list(newdfRNN["dropout"])[0]
best_paramsRNN["batch_size"] = list(newdfRNN["batch_size"])[0]
best_paramsRNN["epochs"] = list(newdfRNN["epochs"])[0]
best_paramsRNN["lr"] = list(newdfRNN["lr"])[0]
best_modelRNN = scan_object.best_model(metric=matrix_type, asc=True)
loss_matrix = best_paramsRNN["losses"]
optimizer = best_paramsRNN["optimizer"]
batchsize = best_paramsRNN["batch_size"]
if self.scoreParam == 'rmse':
best_modelRNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[rmse_m])
elif self.scoreParam == 'r2':
best_modelRNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[ |
r_square])
elif self.scoreParam == 'mae':
best_modelRNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mae'])
else:
best_modelRNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mse'])
scoreRNN = best_modelRNN.evaluate(X1,Y, batch_size=batchsize)
self.log.info("----------> Score Matrix: "+str(best_modelRNN.metrics_names))
self.log.info("----------> Score: "+str(scoreRNN))
self.log.info("----------> Model Params: "+str(best_paramsRNN))
executionTime=time.time() - start
self.log.info('----------> RNN Execution Time: '+str(executionTime)+'\\n')
XSNN = np.expand_dims(self.testX, axis=2)
predictedData = best_modelRNN.predict(XSNN)
if self.scoreParam.lower() == 'mse':
score = mean_squared_error(self.testY,predictedData)
elif self.scoreParam.lower() == 'rmse':
score=mean_squared_error(self.testY,predictedData,squared=False)
elif self.scoreParam.lower() == 'mae':
score=mean_absolute_error(self.testY,predictedData)
elif self.scoreParam.lower() == 'r2':
score=r2_score(self.testY,predictedData)
else:
score = scoreRNN[1]
self.log.info("----------> Testing Score: "+str(score))
scoreRNN[1] = score
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"Recurrent Neural Network","FeatureEngineering":"'+str(self.best_feature_model)+'","Score":'+str(scoreRNN[1])+'}'
self.log.info('Status:- |... DL Algorithm applied: Recurrent Neural Network')
self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2)))
if "Recurrent Neural Network (GRU)"in self.modelList:
self.log.info("-------> Model Name: Recurrent Neural Network (GRU)")
start = time.time()
data = self.modelParams["Recurrent Neural Network (GRU)"]
p = {"RNNType":["GRU"],
"numRNNLayers":[int(n) for n in data["numRNNLayers"].split(",")],
"activation":data["activation"].split(","),
"optimizer":data["optimizer"].split(","),
"losses":data["losses"].split(","),
"first_neuron":[int(n) for n in data["first_layer"].split(",")],
"shapes": data["shapes"].split(","),
"hidden_layers":[int(n) for n in data["hidden_layers"].split(",")],
"dropout": [float(n) for n in data["dropout"].split(",")],
"lr": [float(n) for n in data["learning_rate"].split(",")],
"batch_size": [int(n) for n in data["batch_size"].split(",")],
"epochs": [int(n) for n in data["epochs"].split(",")]}
scan_object = talos.Scan(x=X_train,y=y_train,x_val = X_test,y_val = y_test,model = modelObj.RNNRegression,experiment_name='RNNGRU',params=p,round_limit=self.roundLimit,random_method=self.randomMethod)
matrix_type = 'val_loss'
if self.scoreParam.lower() == 'rmse':
matrix_type = 'val_rmse_m'
elif(self.scoreParam.lower() == 'r2'):
matrix_type = 'val_r_square'
elif(self.scoreParam.lower() == 'mae'):
matrix_type = 'val_mae'
elif(self.scoreParam.lower() == 'mse'):
matrix_type = 'val_mse'
analyze_objectRNNGRU = talos.Analyze(scan_object)
highValAccRNNGRU = analyze_objectRNNGRU.low(matrix_type)
dfRNNGRU = analyze_objectRNNGRU.data
newdfRNNGRU = dfRNNGRU.loc[dfRNNGRU[matrix_type] == highValAccRNNGRU]
best_paramsRNNGRU["RNNType"] = "GRU"
best_paramsRNNGRU["numRNNLayers"] = list(newdfRNNGRU["numRNNLayers"])[0]
best_paramsRNNGRU["activation"] = list(newdfRNNGRU["activation"])[0]
best_paramsRNNGRU["optimizer"] = list(newdfRNNGRU["optimizer"])[0]
best_paramsRNNGRU["losses"] = list(newdfRNNGRU["losses"])[0]
best_paramsRNNGRU["first_layer"] = list(newdfRNNGRU["first_neuron"])[0]
best_paramsRNNGRU["shapes"] = list(newdfRNNGRU["shapes"])[0]
best_paramsRNNGRU["hidden_layers"] = list(newdfRNNGRU["hidden_layers"])[0]
best_paramsRNNGRU["dropout"] = list(newdfRNNGRU["dropout"])[0]
best_paramsRNNGRU["batch_size"] = list(newdfRNNGRU["batch_size"])[0]
best_paramsRNNGRU["epochs"] = list(newdfRNNGRU["epochs"])[0]
best_paramsRNNGRU["lr"] = list(newdfRNNGRU["lr"])[0]
best_modelRNNGRU = scan_object.best_model(metric=matrix_type, asc=True)
loss_matrix = best_paramsRNNGRU["losses"]
optimizer = best_paramsRNNGRU["optimizer"]
batchsize = best_paramsRNNGRU["batch_size"]
if self.scoreParam == 'rmse':
best_modelRNNGRU.compile(loss=loss_matrix,optimizer=optimizer, metrics=[rmse_m])
elif self.scoreParam == 'r2':
best_modelRNNGRU.compile(loss=loss_matrix,optimizer=optimizer, metrics=[r_square])
elif self.scoreParam == 'mae':
best_modelRNNGRU.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mae'])
else:
best_modelRNNGRU.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mse'])
scoreRNNGRU = best_modelRNNGRU.evaluate(X1,Y, batch_size=batchsize)
self.log.info("----------> Score Matrix: "+str(best_modelRNNGRU.metrics_names))
self.log.info("----------> Score: "+str(scoreRNNGRU))
self.log.info("----------> Model Params: "+str(best_paramsRNNGRU))
executionTime=time.time() - start
self.log.info('----------> RNN Execution Time: '+str(executionTime)+'\\n')
XSNN = np.expand_dims(self.testX, axis=2)
predictedData = best_modelRNNGRU.predict(XSNN)
if self.scoreParam.lower() == 'mse':
score = mean_squared_error(self.testY,predictedData)
elif self.scoreParam.lower() == 'rmse':
score=mean_squared_error(self.testY,predictedData,squared=False)
elif self.scoreParam.lower() == 'mae':
score=mean_absolute_error(self.testY,predictedData)
elif self.scoreParam.lower() == 'r2':
score=r2_score(self.testY,predictedData)
else:
score = scoreRNNGRU[1]
self.log.info("----------> Testing Score: "+str(score))
scoreRNNGRU[1] = score
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"Recurrent Neural Network (GRU)","FeatureEngineering":"'+str(self.best_feature_model)+'","Score":'+str(scoreRNNGRU[1])+'}'
self.log.info('Status:- |... DL Algorithm applied: Recurrent Neural Network (GRU)')
self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2)))
if "Recurrent Neural Network (LSTM)"in self.modelList:
self.log.info("-------> Model Name: Recurrent Neural Network (LSTM)")
start = time.time()
data = self.modelParams["Recurrent Neural Network (LSTM)"]
p = {"RNNType":["LSTM"],
"numRNNLayers":[int(n) for n in data["numRNNLayers"].split(",")],
"activation":data["activation"].split(","),
"optimizer":data["optimizer"].split(","),
"losses":data["losses"].split(","),
"first_neuron":[int(n) for n in data["first_layer"].split(",")],
"shapes": data["shapes"].split(","),
"hidden_layers":[int(n) for n in data["hidden_layers"].split(",")],
"dropout": [float(n) for n in data["dropout"].split(",")],
"lr": [float(n) for n in data["learning_rate"].split(",")],
"batch_size": [int(n) for n in data["batch_size"].split(",")],
"epochs": [int(n) for n in data["epochs"].split(",")]}
scan_object = talos.Scan(x=X_train,y=y_train,x_val = X_test,y_val = y_test,model = modelObj.RNNRegression,experiment_name='RNNLSTM',params=p,round_limit=self.roundLimit,random_method=self.randomMethod)
matrix_type = 'val_loss'
if self.scoreParam.lower() == 'rmse':
matrix_type = 'val_rmse_m'
elif(self.scoreParam.lower() == 'r2'):
matrix_type = 'val_r_square'
elif(self.scoreParam.lower() == 'mae'):
matrix_type = 'val_mae'
elif(self.scoreParam.lower() == 'mse'):
matrix_type = 'val_mse'
analyze_objectRNNLSTM = talos.Analyze(scan_object)
highValAccRNNLSTM = analyze_objectRNNLSTM.low(matrix_type)
dfRNNLSTM = analyze_objectRNNLSTM.data
newdfRNNLSTM = dfRNNLSTM.loc[dfRNNLSTM[matrix_type] == highValAccRNNLSTM]
best_paramsRNNLSTM["RNNType"] = "GRU"
best_paramsRNNLSTM["numRNNLayers"] = list(newdfRNNLSTM["numRNNLayers"])[0]
best_paramsRNNLSTM["activation"] = list(newdfRNNLSTM["activation"])[0]
best_paramsRNNLSTM["optimizer"] = list(newdfRNNLSTM["optimizer"])[0]
best_paramsRNNLSTM["losses"] = list(newdfRNNLSTM["losses"])[0]
best_paramsRNNLSTM["first_layer"] = list(newdfRNNLSTM["first_neuron"])[0]
best_paramsRNNLSTM["shapes"] = list(newdfRNNLSTM["shapes"])[0]
best_paramsRNNLSTM["hidden_layers"] = list(newdfRNNLSTM["hidden_layers"])[0]
best_paramsRNNLSTM["dropout"] = list(newdfRNNLSTM["dropout"])[0]
best_paramsRNNLSTM["batch_size"] = list(newdfRNNLSTM["batch_size"])[0]
best_paramsRNNLSTM["epochs"] = list(newdfRNNLSTM["epochs"])[0]
best_paramsRNNLSTM["lr"] = list(newdfRNNLSTM["lr"])[0]
best_modelRNNLSTM = scan_object.best_model(metric=matrix_type, asc=True)
loss_matrix = best_paramsRNNLSTM["losses"]
optimizer = best_paramsRNNLSTM["optimizer"]
batchsize = best_paramsRNNLSTM["batch_size"]
if self.scoreParam == 'rmse':
best_modelRNNLSTM.compile(loss=loss_matrix,optimizer=optimizer, metrics=[rmse_m])
elif self.scoreParam == 'r2':
best_modelRNNLSTM.compile(loss=loss |
_matrix,optimizer=optimizer, metrics=[r_square])
elif self.scoreParam == 'mae':
best_modelRNNLSTM.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mae'])
else:
best_modelRNNLSTM.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mse'])
scoreRNNLSTM = best_modelRNNLSTM.evaluate(X1,Y, batch_size=batchsize)
self.log.info("----------> Score Matrix: "+str(best_modelRNNLSTM.metrics_names))
self.log.info("----------> Score: "+str(scoreRNNLSTM))
self.log.info("----------> Model Params: "+str(best_paramsRNNLSTM))
executionTime=time.time() - start
self.log.info('----------> RNN Execution Time: '+str(executionTime)+'\\n')
XSNN = np.expand_dims(self.testX, axis=2)
predictedData = best_modelRNNLSTM.predict(XSNN)
if self.scoreParam.lower() == 'mse':
score = mean_squared_error(self.testY,predictedData)
elif self.scoreParam.lower() == 'rmse':
score=mean_squared_error(self.testY,predictedData,squared=False)
elif self.scoreParam.lower() == 'mae':
score=mean_absolute_error(self.testY,predictedData)
elif self.scoreParam.lower() == 'r2':
score=r2_score(self.testY,predictedData)
else:
score = scoreRNNLSTM[1]
self.log.info("----------> Testing Score: "+str(score))
scoreRNNLSTM[1] = score
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"Recurrent Neural Network (LSTM)","FeatureEngineering":"'+str(self.best_feature_model)+'","Score":'+str(scoreRNNLSTM[1])+'}'
self.log.info('Status:- |... DL Algorithm applied: Recurrent Neural Network (LSTM)')
self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2)))
if "Convolutional Neural Network (1D)"in self.modelList:
self.log.info("-------> Model Name: CNN")
start = time.time()
data = self.modelParams["Convolutional Neural Network (1D)"]
p = {"activation":data["activation"].split(","),
"kernel_size":data["kernel_size"].split(","),
"numConvLayers":[int(n) for n in data["numConvLayers"].split(",")],
"MaxPool":data["activation"].split(","),
"optimizer":data["optimizer"].split(","),
"losses":data["losses"].split(","),
"first_neuron":[int(n) for n in data["first_layer"].split(",")],
"shapes": data["shapes"].split(","),
"hidden_layers":[int(n) for n in data["hidden_layers"].split(",")],
"dropout": [float(n) for n in data["dropout"].split(",")],
"lr": [float(n) for n in data["learning_rate"].split(",")],
"batch_size": [int(n) for n in data["batch_size"].split(",")],
"epochs": [int(n) for n in data["epochs"].split(",")]}
scan_object = talos.Scan(x=X_train,y=y_train, x_val = X_test, y_val = y_test, model = modelObj.CNNRegression,experiment_name='CNN', params=p,round_limit=self.roundLimit,random_method=self.randomMethod)
matrix_type = 'val_loss'
if self.scoreParam.lower() == 'rmse':
matrix_type = 'val_rmse_m'
elif(self.scoreParam.lower() == 'r2'):
matrix_type = 'val_r_square'
elif(self.scoreParam.lower() == 'mae'):
matrix_type = 'val_mae'
elif(self.scoreParam.lower() == 'mse'):
matrix_type = 'val_mse'
analyze_objectCNN = talos.Analyze(scan_object)
highValAccCNN = analyze_objectCNN.low(matrix_type)
dfCNN = analyze_objectCNN.data
newdfCNN = dfCNN.loc[dfCNN[matrix_type] == highValAccCNN]
best_paramsCNN["numConvLayers"] = list(newdfCNN["numConvLayers"])[0]
best_paramsCNN["MaxPool"] = list(newdfCNN["MaxPool"])[0]
best_paramsCNN["activation"] = list(newdfCNN["activation"])[0]
best_paramsCNN["optimizer"] = list(newdfCNN["optimizer"])[0]
best_paramsCNN["losses"] = list(newdfCNN["losses"])[0]
best_paramsCNN["first_layer"] = list(newdfCNN["first_neuron"])[0]
best_paramsCNN["shapes"] = list(newdfCNN["shapes"])[0]
best_paramsCNN["hidden_layers"] = list(newdfCNN["hidden_layers"])[0]
best_paramsCNN["dropout"] = list(newdfCNN["dropout"])[0]
best_paramsCNN["batch_size"] = list(newdfCNN["batch_size"])[0]
best_paramsCNN["epochs"] = list(newdfCNN["epochs"])[0]
best_paramsCNN["lr"] = list(newdfCNN["lr"])[0]
best_modelCNN = scan_object.best_model(metric=matrix_type, asc=True)
loss_matrix = best_paramsCNN["losses"]
optimizer = best_paramsCNN["optimizer"]
batchsize = best_paramsCNN["batch_size"]
if self.scoreParam == 'rmse':
best_modelCNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[rmse_m])
elif self.scoreParam == 'r2':
best_modelCNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[r_square])
elif self.scoreParam == 'mae':
best_modelCNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mae'])
else:
best_modelCNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=['mse'])
scoreCNN = best_modelCNN.evaluate(X1,Y, batch_size=batchsize)
self.log.info("----------> Score Matrix: "+str(best_modelCNN.metrics_names))
self.log.info("----------> Score: "+str(scoreCNN))
self.log.info("----------> Model Params: "+str(best_paramsCNN))
executionTime=time.time() - start
self.log.info('----------> CNN Execution Time: '+str(executionTime)+'\\n')
XSNN = np.expand_dims(self.testX, axis=2)
predictedData = best_modelCNN.predict(XSNN)
if self.scoreParam.lower() == 'mse':
score = mean_squared_error(self.testY,predictedData)
elif self.scoreParam.lower() == 'rmse':
score=mean_squared_error(self.testY,predictedData,squared=False)
elif self.scoreParam.lower() == 'mae':
score=mean_absolute_error(self.testY,predictedData)
elif self.scoreParam.lower() == 'r2':
score=r2_score(self.testY,predictedData)
else:
score = scoreCNN[1]
self.log.info("----------> Testing Score: "+str(score))
scoreCNN[1] = score
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"CNN","FeatureEngineering":"'+str(self.best_feature_model)+'","Score":'+str(scoreCNN[1])+'}'
self.log.info('Status:- |... DL Algorithm applied: CNN')
self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2)))
modelScore = []
if len(scoreSNN) != 0:
modelScore.append(scoreSNN[1])
if len(scoreRNN) != 0:
modelScore.append(scoreRNN[1])
if len(scoreRNNGRU) != 0:
modelScore.append(scoreRNNGRU[1])
if len(scoreRNNLSTM) != 0:
modelScore.append(scoreRNNLSTM[1])
if len(scoreCNN) != 0:
modelScore.append(scoreCNN[1])
selectedModel = ""
best_model = ""
if self.scoreParam == "r2":
if len(scoreSNN) != 0 and max(modelScore) == scoreSNN[1]:
selectedModel = "Neural Network"
best_model = best_modelSNN
best_params = best_paramsSNN
elif len(scoreRNN) != 0 and max(modelScore) == scoreRNN[1]:
selectedModel = "Recurrent Neural Network"
best_model = best_modelRNN
best_params = best_paramsRNN
elif len(scoreRNNGRU) != 0 and max(modelScore) == scoreRNNGRU[1]:
selectedModel = "Recurrent Neural Network (GRU)"
best_model = best_modelRNNGRU
best_params = best_paramsRNNGRU
elif len(scoreRNNLSTM) != 0 and max(modelScore) == scoreRNNLSTM[1]:
selectedModel = "Recurrent Neural Network (LSTM)"
best_model = best_modelRNNLSTM
best_params = best_paramsRNNLSTM
elif len(scoreCNN) != 0 and max(modelScore) == scoreCNN[1]:
selectedModel = "Convolutional Neural Network (1D)"
best_model = best_modelCNN
best_params = best_paramsCNN
modelScore = max(modelScore)
else:
if len(scoreSNN) != 0 and min(modelScore) == scoreSNN[1]:
selectedModel = "Neural Network"
best_model = best_modelSNN
best_params = best_paramsSNN
elif len(scoreRNN) != 0 and min(modelScore) == scoreRNN[1]:
selectedModel = "Recurrent Neural Network"
best_model = best_modelRNN
best_params = best_paramsRNN
elif len(scoreRNNGRU) != 0 and min(modelScore) == scoreRNNGRU[1]:
selectedModel = "Recurrent Neural Network (GRU)"
best_model = best_modelRNNGRU
best_params = best_paramsRNNGRU
elif len(scoreRNNLSTM) != 0 and min(modelScore) == scoreRNNLSTM[1]:
selectedModel = "Recurrent Neural Network (LSTM)"
best_model = best_modelRNNLSTM
best_params = best_paramsRNNLSTM
elif len(scoreCNN) != 0 and min(modelScore) == scoreCNN[1]:
selectedModel = "Convolutional Neural Network (1D)"
best_model = best_modelCNN
best_params = best_paramsCNN
modelScore = min(modelScore)
executionTime=time.time() - lstart
self.log.info("-------> Total Execution Time(sec):"+str(executionTime))
self.log.info('Status:- |... Best Algorithm selected: '+str(selectedModel)+' '+str(round(modelScore,2)))
return selectedModel,modelScore,best_model,best_params,X1,XSNN,scoredetails,loss_matrix,optimizer
except Exception as inst:
self.log.info( '\\n-----> regressionModel failed!!!.'+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Techn |
ologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import logging
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import LabelBinarizer
from imblearn.over_sampling import RandomOverSampler,SMOTE
from imblearn.under_sampling import RandomUnderSampler
from imblearn.under_sampling import TomekLinks
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error,make_scorer
from sklearn.metrics import mean_squared_error
from sklearn.metrics import log_loss
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.models import load_model
from dlearning.Classification import DLClassificationModel
from dlearning.Regression import DLRegressionModel
from learner.machinelearning import machinelearning
from sklearn.metrics import matthews_corrcoef, brier_score_loss
import os
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
def rmse_m(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
def r_square(y_true, y_pred):
SS_res = K.sum(K.square(y_true-y_pred))
SS_tot = K.sum(K.square(y_true-K.mean(y_true)))
return (1 - SS_res/(SS_tot+K.epsilon()))
class deeplearning(object):
def __init__(self):
self.log = logging.getLogger('eion')
def getDLPredictionData(self,model_dl,hist_reloaded,X):
if model_dl == "Neural Network":
XSNN = X.values
predictedData = hist_reloaded.predict(XSNN)
else:
X1 = np.expand_dims(X, axis=2)
predictedData = hist_reloaded.predict(X1)
return(predictedData)
def getPredictionData(self,model_dl,hist_reloaded,X):
if model_dl == "Neural Network":
XSNN = X.values
#predictedData = hist_reloaded.predict_classes(XSNN)
predict_x=hist_reloaded.predict(XSNN)
predictedData=np.argmax(predict_x,axis=1)
else:
X1 = np.expand_dims(X, axis=2)
#predictedData = hist_reloaded.predict_classes(X1)
predict_x=hist_reloaded.predict(X1)
predictedData=np.argmax(predict_x,axis=1)
return(predictedData, predict_x)
def LoadDL_Regression_Model(self,filename_dl,scoreParam,loss_matrix,optimizer):
if(scoreParam.lower() == 'rmse'):
hist_reloaded = load_model(filename_dl,custom_objects={"rmse": rmse_m},compile=False)
hist_reloaded.compile(loss=loss_matrix,optimizer=optimizer, metrics=[rmse_m])
elif(scoreParam.lower() == 'r2'):
hist_reloaded = load_model(filename_dl,custom_objects={"r2": r_square},compile=False)
hist_reloaded.compile(loss=loss_matrix,optimizer=optimizer, metrics=[r_square])
else:
hist_reloaded = load_model(filename_dl)
return(hist_reloaded)
def startLearning(self,model_type,modelList, modelParams, scoreParam, cvSplit, xtrain,ytrain,xtest,ytest,method,randomMethod,roundLimit,labelMaps,df_test,deployLocation,modelName,modelVersion,best_feature_model):
mlobj = machinelearning()
if model_type == 'Classification':
self.log.info('\\n------ Training DL: Classification ----')
objClf = DLClassificationModel(modelList, modelParams, scoreParam, cvSplit, xtrain,ytrain,xtest,ytest,method,randomMethod,roundLimit,best_feature_model)
dftrain = xtrain.copy()
dftrain['Target'] = ytrain
model_dl,score_dl,best_model_dl,params_dl,X1,XSNN,model_tried_dl,loss_matrix,optimizer = objClf.TalosScan(objClf)
self.log.info('------ Training DL: Classification End----\\n')
saved_model_dl = 'dl_'+modelName+'_'+modelVersion+'.sav'
filename_dl = os.path.join(deployLocation,'model',saved_model_dl)
best_model_dl.save(filename_dl)
hist_reloaded = self.LoadDL_Classification_Model(filename_dl,scoreParam,loss_matrix,optimizer)
self.log.info('\\n--------- Performance Matrix with Train Data ---------')
predictedData, prob = self.getPredictionData(model_dl,hist_reloaded,xtrain)
trainingperformancematrix = mlobj.getClassificationPerformaceMatrix(ytrain, predictedData, prob,labelMaps)
self.log.info('\\n--------- Performance Matrix with Train Data End ---------')
predictedData, prob = self.getPredictionData(model_dl,hist_reloaded,xtest)
df_test['predict'] = predictedData
self.log.info('\\n--------- Performance Matrix with Test Data ---------')
performancematrix = mlobj.getClassificationPerformaceMatrix(ytest, predictedData, prob,labelMaps)
self.log.info('\\n--------- Performance Matrix with Test Data End ---------')
return(model_dl,score_dl,best_model_dl,params_dl,X1,XSNN,model_tried_dl,loss_matrix,optimizer,saved_model_dl,filename_dl,dftrain,df_test,performancematrix,trainingperformancematrix)
else:
objReg = DLRegressionModel(modelList, modelParams, scoreParam, cvSplit, xtrain,ytrain,xtest,ytest,method,randomMethod,roundLimit,best_feature_model)
dftrain = xtrain.copy()
dftrain['Target'] = ytrain
model_dl,score_dl,best_model_dl,params_dl,X1,XSNN,model_tried_dl,loss_matrix,optimizer = objReg.TalosScan(objReg)
self.log.info('------ Training DL: Regression End----\\n')
self.log.info('\\n------- Best DL Model and its parameters -------------')
self.log.info('-------> Best Model: '+str(model_dl))
self.log.info('-------> Best Score: '+str(score_dl))
self.log.info('-------> Best Params: '+str(params_dl))
self.log.info('------- Best DL Model and its parameters End-------------\\n')
saved_model_dl = 'dl_'+modelName+'_'+modelVersion+'.sav'
filename_dl = os.path.join(deployLocation,'model',saved_model_dl)
best_model_dl.save(filename_dl)
hist_reloaded=self.LoadDL_Regression_Model(filename_dl,scoreParam,loss_matrix,optimizer)
predictedData = self.getDLPredictionData(model_dl,hist_reloaded,xtrain)
self.log.info('\\n--------- Performance Matrix with Train Data ---------')
trainingperformancematrix = mlobj.get_regression_matrix(ytrain, predictedData)
self.log.info('--------- Performance Matrix with Train Data End---------\\n')
predictedData = self.getDLPredictionData(model_dl,hist_reloaded,xtest)
df_test['predict'] = predictedData
self.log.info('\\n--------- Performance Matrix with Test Data ---------')
performancematrix = mlobj.get_regression_matrix(ytest, predictedData)
self.log.info('--------- Performance Matrix with Test Data End---------\\n')
return(model_dl,score_dl,best_model_dl,params_dl,X1,XSNN,model_tried_dl,loss_matrix,optimizer,saved_model_dl,filename_dl,dftrain,df_test,performancematrix,trainingperformancematrix)
def LoadDL_Classification_Model(self,filename_dl,scoreParam,loss_matrix,optimizer):
if(scoreParam.lower() == 'recall'):
hist_reloaded = load_model(filename_dl,custom_objects={"recall": recall_m},compile=False)
hist_reloaded.compile(loss=loss_matrix,optimizer=optimizer, metrics=[recall_m])
elif(scoreParam.lower() == 'precision'):
hist_reloaded = load_model(filename_dl,custom_objects={"precision": precision_m},compile=False)
hist_reloaded.compile(loss=loss_matrix,optimizer=optimizer, metrics=[precision_m])
elif(scoreParam.lower() == 'roc_auc'):
hist_reloaded = load_model(filename_dl,compile=False)
hist_reloaded.compile(loss=loss_matrix,optimizer=optimizer, metrics=[tf.keras.metrics.AUC()])
elif(scoreParam.lower() == 'f1_score'):
hist_reloaded = load_model(filename_dl,custom_objects={"f1_score": f1_m},compile=False)
hist_reloaded.compile(loss=loss_matrix,optimizer=optimizer, metrics=[f1_m])
else:
hist_reloaded = load_model(filename_dl)
return(hist_reloaded)
def getClassificationPerformaceMatrix(self,le_trainY,predictedData,prob,labelMaps):
setOfyTrue = set(le_trainY)
unqClassLst = list(setOfyTrue)
if(str(labelMaps) != '{}'):
inv_mapping_dict = {v: k for k, v in labelMaps.items()}
unqClassLst2 = (pd.Series(unqClassLst)).map(inv_mapping_dict)
unqClassLst2 = list(unqClassLst2)
else:
unqClassLst2 = unqClassLst
indexName = []
columnName = []
for item in unqClassLst2:
indexName.append("true:"+str(item))
columnName.append(str(item))
matrixconfusion = pd.DataFrame(confusion_matrix(le_trainY,predictedData, labels = unqClassLst),index = indexName, columns = columnName)
self.log.info('\\n <--- Confusion Matrix --->')
self.log.info(matrixconfusion)
classificationreport = pd.DataFrame(classification_report(le_trainY, predictedData, output_dict=True))
self.log.info('\\n <--- Classification Report --->')
self.log.info(classificationreport)
lb = LabelBinarizer()
lb.fit(le_trainY)
transformTarget= lb.transform(le_trainY)
if transformTarget.shape[-1] == 1:
transformTarget = le_trainY
prob = np.delete( prob, 0, 1)
rocaucscore = roc_auc_score(transformTarget,prob,average="macro")
brier_score = None
mcc_score = matthews_corrcoef(le_trainY,predictedData)
if len(unqClassLst) > 2:
brier_score = np.mean(np.sum(np.square(prob - transformTarget), axis=1))
else:
brier_score = brier_score_loss(transformTarget,prob)
self.log.info('-------> ROC AUC SCORE :'+str(rocaucscore))
self.log.info(f'-------> Matthews correlation coefficient SCORE : {mcc_score}')
self.log.info(f'-------> BRIER SCORE : {brier_score}')
matrixconfusion = matrixconfusion.to_json(orient='index')
classificationreport = classificationreport.to_json(orient='index')
matrix = f'"ConfusionMatrix": {matrixconfusion},"ClassificationReport": {classificationreport},"ROC_AUC_SCORE": {rocaucscore},"MCC_SCORE": {mcc_score},"BRIER_SCORE": {brier_score}'
return(matrix)
def split_into_train_test_data(self,featureData,targetData,cvSplit,testPercentage,modelType='classification'):
'''
if cvSplit == None:
'''
testSize=testPercentage/100
if modelType == 'regression':
xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,test_size=testSize,shuffle=True)
else:
try:
xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,stratify=targetData,test_size=testSize,shuffle=True)
except:
xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,test_size=testSize,shuffle=True)
self.log.info('\\n<-------------- Test Train Split ------------- |
--->\\n')
self.log.info('\\n<-------- Train Data Shape '+str(xtrain.shape)+' ---------->\\n')
self.log.info('\\n<-------- Test Data Shape '+str(xtest.shape)+' ---------->\\n')
'''
else:
xtrain=featureData
ytrain=targetData
xtest=featureDa |
Conv1D(filters=params['first_neuron'], kernel_size=(3), activation=params['activation'], input_shape=(x_train.shape[1],1),padding='same') )
if params['numConvLayers'] > 1:
for x in range(1,params['numConvLayers']):
if params['MaxPool'] == "True":
model.add(MaxPooling1D(pool_size=2,padding='same'))
model.add(Conv1D(filters=8, kernel_size=3, activation=params['activation'],padding='same'))
talos.utils.hidden_layers(model, params, x_train.shape[1])
model.add(MaxPooling1D(pool_size=2,padding='same'))
model.add(Flatten())
model.add(Dense(y_train.shape[1],activation=params['last_activation']))
model.compile(loss=params['losses'],optimizer=params['optimizer'],metrics=['acc',f1_m,precision_m,recall_m,tf.keras.metrics.AUC()])
out = model.fit(x_train, y_train, validation_data=(x_val, y_val), batch_size=params['batch_size'],
epochs=params['epochs'],verbose=0,shuffle=True)
return out, model
def TalosScan(self,modelObj):
try:
#dataPath = pd.read_csv(self.dataLocation)
#X = dataPath.drop(self.targetData, axis=1)
loss_matrix='binary_crossentropy'
optimizer='Nadam'
X = self.featuresData
x = X.values
Y = self.targetData
scoredetails = ''
#Y= dataPath[self.targetData]
y = Y.values
y = kutils.to_categorical(y)
XSNN = X.values
X1 = np.expand_dims(X, axis=2)
kf = KFold(n_splits = self.cvSplit)
for train_index, test_index in kf.split(X):
X_train, X_test = x[train_index], x[test_index]
y_train, y_test = y[train_index], y[test_index]
data = self.modelParams
models = data.keys()
start = time.time()
scoreSNN = []
scoreRNN = []
scoreCNN = []
scoreRNNGRU = []
scoreRNNLSTM = []
best_paramsSNN = {}
best_paramsRNN = {}
best_paramsRNNGRU = {}
best_paramsRNNLSTM = {}
best_paramsCNN = {}
if "Neural Network"in self.modelList:
self.log.info("-------> Model Name: Neural Network")
start = time.time()
data = self.modelParams["Neural Network"]
p = {"activation":data["activation"].split(","),
"last_activation":data["last_activation"].split(","),
"optimizer":data["optimizer"].split(","),
"losses":data["losses"].split(","),
"first_neuron":[int(n) for n in data["first_layer"].split(",")],
"shapes": data["shapes"].split(","),
"hidden_layers":[int(n) for n in data["hidden_layers"].split(",")],
"dropout": [float(n) for n in data["dropout"].split(",")],
"lr": [float(n) for n in data["learning_rate"].split(",")],
"batch_size": [int(n) for n in data["batch_size"].split(",")],
"epochs": [int(n) for n in data["epochs"].split(",")]
}
param_combinations = int(np.prod([len(x.split(',')) for x in p]))
round_limit = self.roundLimit if not self.roundLimit else min(self.roundLimit, param_combinations)
scan_object = talos.Scan(x=X_train,
y=y_train,
x_val = X_test,
y_val = y_test,
model = modelObj.SNNClassification,
experiment_name='SNN',
params=p,
round_limit=round_limit,
random_method=self.randomMethod
)
matrix_type = 'val_acc'
if self.scoreParam.lower() == 'accuracy':
matrix_type = 'val_acc'
elif(self.scoreParam.lower() == 'roc_auc'):
matrix_type = 'val_auc'
elif(self.scoreParam.lower() == 'recall'):
matrix_type = 'val_recall_m'
elif(self.scoreParam.lower() == 'precision'):
matrix_type = 'val_precision_m'
elif(self.scoreParam.lower() == 'f1_score'):
matrix_type = 'val_f1_m'
analyze_objectSNN = talos.Analyze(scan_object)
highValAccSNN = analyze_objectSNN.high(matrix_type)
dfSNN = analyze_objectSNN.data
#pd.set_option('display.max_columns',20)
#print(dfSNN)
#pd.reset_option('display.max_columns')
newdfSNN = dfSNN.loc[dfSNN[matrix_type] == highValAccSNN]
if(len(newdfSNN) > 1):
lowLoss = analyze_objectSNN.low('val_loss')
newdfSNN = newdfSNN.loc[newdfSNN['val_loss'] == lowLoss]
best_paramsSNN["activation"] = list(newdfSNN["activation"])[0]
best_paramsSNN["optimizer"] = list(newdfSNN["optimizer"])[0]
best_paramsSNN["losses"] = list(newdfSNN["losses"])[0]
best_paramsSNN["first_layer"] = list(newdfSNN["first_neuron"])[0]
best_paramsSNN["shapes"] = list(newdfSNN["shapes"])[0]
best_paramsSNN["hidden_layers"] = list(newdfSNN["hidden_layers"])[0]
best_paramsSNN["dropout"] = list(newdfSNN["dropout"])[0]
best_paramsSNN["batch_size"] = list(newdfSNN["batch_size"])[0]
best_paramsSNN["epochs"] = list(newdfSNN["epochs"])[0]
best_paramsSNN["lr"] = list(newdfSNN["lr"])[0]
best_paramsSNN["last_activation"] = list(newdfSNN["last_activation"])[0]
best_modelSNN = scan_object.best_model(metric=matrix_type)
try:
if(len(best_paramsSNN["losses"]) == 0):
loss_matrix = 'binary_crossentropy'
else:
loss_matrix = best_paramsSNN["losses"]
if(len(best_paramsSNN["optimizer"]) == 0):
optimizer = 'Nadam'
else:
optimizer = best_paramsSNN["optimizer"]
if best_paramsSNN["batch_size"] == 0:
batchsize = 32
else:
batchsize = best_paramsSNN["batch_size"]
except:
loss_matrix = 'binary_crossentropy'
optimizer = 'Nadam'
batchsize = 32
if self.scoreParam == 'accuracy':
best_modelSNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=['accuracy'])
elif self.scoreParam == 'roc_auc':
best_modelSNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[tf.keras.metrics.AUC()])
elif self.scoreParam == 'recall':
best_modelSNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[recall_m])
elif self.scoreParam == 'precision':
best_modelSNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[precision_m])
elif self.scoreParam == 'f1_score':
best_modelSNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[f1_m])
scoreSNN = best_modelSNN.evaluate(XSNN,y, batch_size=batchsize)
self.log.info("----------> Score Matrix: "+str(best_modelSNN.metrics_names))
self.log.info("----------> Score: "+str(scoreSNN))
self.log.info("----------> Model Params: "+str(best_paramsSNN))
executionTime=time.time() - start
XSNN = self.testX.values
#predict_x=best_modelSNN.predict(XSNN)
predictedData=np.argmax(best_modelSNN.predict(XSNN),axis=1)
#predictedData = best_modelSNN.predict_classes(XSNN)
#print(predictedData)
#predictedData = best_modelSNN.predict(self.testX)
if 'accuracy' in str(self.scoreParam):
score = accuracy_score(self.testY,predictedData)
elif 'recall' in str(self.scoreParam):
score = recall_score(self.testY,predictedData, average='macro')
elif 'precision' in str(self.scoreParam):
score = precision_score(self.testY,predictedData,average='macro')
elif 'f1_score' in str(self.scoreParam):
score = f1_score(self.testY,predictedData, average='macro')
elif 'roc_auc' in str(self.scoreParam):
score = roc_auc_score(self.testY,predictedData,average="macro")
score = round((score*100),2)
self.log.info("----------> Testing Score: "+str(score))
self.log.info('----------> Total Execution: '+str(executionTime)+'\\n')
scoreSNN[1] = score
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"Neural Network","FeatureEngineering":"'+str(self.best_feature_model)+'","Score":'+str(scoreSNN[1])+'}'
self.log.info('Status:- |... DL Algorithm applied: Neural Network')
self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2)))
if "Recurrent Neural Network"in self.modelList:
self.log.info("-------> Model Name: Recurrent Neural Network")
start = time.time()
data = self.modelParams["Recurrent Neural Network"]
p = {"RNNType":["SimpleRNN"],
"numRNNLayers":[int(n) for n in data["numRNNLayers"].split(",")],
"activation":data["activation"].split(","),
"last_activation":data["last_activation"].split(","),
"optimizer":data["optimizer"].split(","),
"losses":data["losses"].split(","),
"first_neuron":[int(n) for n in data["first_layer"].split(",")],
"shapes": data["shapes"].split(","),
"hidden_layers":[int(n) for n in data["hidden_layers"].split(",")],
"dropout": [float(n) for n in data["dropout"].split(",")],
"lr": [float(n) for n in data["learning_rate"].split(",")],
"batch_size": [int(n) for n in data["batch_size"].split(",")],
"epochs": [int(n) for n in data["epochs"].split(",")]}
param_combinations = int(np.prod([len(x.split(',')) for x in p]))
round_limit = self.roundLimit if not self.roundLimit else min(self.roundLimit, param_combinations)
scan_object = talos.Scan(x=X_train,
y=y_train,
x_val = X_test,
y_val = y_test,
model = modelObj.RNNClassification,
experiment_name='RNN',
params=p,
round_limit=round_limit,
random_method=self.randomMethod
)
matrix_type = 'val_acc'
if self.scoreParam.lower() == 'accuracy':
matrix_type = 'val_acc'
elif(self.scoreParam.lower() == 'roc_auc'):
matrix_type = 'val_auc'
elif(self.scoreParam.lower() == 'recall'):
matrix_type = 'val_recall_m'
elif(self.score |
Param.lower() == 'precision'):
matrix_type = 'val_precision_m'
elif(self.scoreParam.lower() == 'f1_score'):
matrix_type = 'val_f1_m'
analyze_objectRNN = talos.Analyze(scan_object)
highValAccRNN = analyze_objectRNN.high(matrix_type)
dfRNN = analyze_objectRNN.data
newdfRNN = dfRNN.loc[dfRNN[matrix_type] == highValAccRNN]
if(len(newdfRNN) > 1):
lowLoss = analyze_objectRNN.low('val_loss')
newdfRNN = newdfRNN.loc[newdfRNN['val_loss'] == lowLoss]
best_paramsRNN["RNNType"] = list(newdfRNN["RNNType"])[0]
best_paramsRNN["numRNNLayers"] = list(newdfRNN["numRNNLayers"])[0]
best_paramsRNN["activation"] = list(newdfRNN["activation"])[0]
best_paramsRNN["optimizer"] = list(newdfRNN["optimizer"])[0]
best_paramsRNN["losses"] = list(newdfRNN["losses"])[0]
best_paramsRNN["first_layer"] = list(newdfRNN["first_neuron"])[0]
best_paramsRNN["shapes"] = list(newdfRNN["shapes"])[0]
best_paramsRNN["hidden_layers"] = list(newdfRNN["hidden_layers"])[0]
best_paramsRNN["dropout"] = list(newdfRNN["dropout"])[0]
best_paramsRNN["batch_size"] = list(newdfRNN["batch_size"])[0]
best_paramsRNN["epochs"] = list(newdfRNN["epochs"])[0]
best_paramsRNN["lr"] = list(newdfRNN["lr"])[0]
best_paramsRNN["last_activation"] = list(newdfRNN["last_activation"])[0]
best_modelRNN = scan_object.best_model(metric=matrix_type, asc=False)
try:
if(len(best_paramsRNN["losses"]) == 0):
loss_matrix = 'binary_crossentropy'
else:
loss_matrix = best_paramsRNN["losses"][0]
if(len(best_paramsRNN["optimizer"]) == 0):
optimizer = 'Nadam'
else:
optimizer = best_paramsRNN["optimizer"][0]
if(best_paramsRNN["batch_size"] == 0):
batchsize = 32
else:
batchsize = best_paramsRNN["batch_size"][0]
except:
loss_matrix = 'binary_crossentropy'
optimizer = 'Nadam'
batchsize = 32
if self.scoreParam == 'accuracy':
best_modelRNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=['accuracy'])
elif self.scoreParam == 'recall':
best_modelRNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[recall_m])
elif self.scoreParam == 'roc_auc':
best_modelRNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[tf.keras.metrics.AUC()])
elif self.scoreParam == 'precision':
best_modelRNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[precision_m])
elif self.scoreParam == 'f1_score':
best_modelRNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[f1_m])
self.log.info("----------> Score Matrix: "+str(best_modelRNN.metrics_names))
scoreRNN = best_modelRNN.evaluate(X1,y, batch_size=batchsize)
self.log.info("----------> Score: "+str(scoreRNN))
self.log.info("----------> Model Params: "+str(best_paramsRNN))
executionTime=time.time() - start
self.log.info('----------> Total Execution: '+str(executionTime)+'\\n')
XSNN = np.expand_dims(self.testX, axis=2)
#predictedData = best_modelRNN.predict_classes(XSNN)
predictedData=np.argmax(best_modelRNN.predict(XSNN),axis=1)
#predictedData = best_modelSNN.predict(self.testX)
if 'accuracy' in str(self.scoreParam):
score = accuracy_score(self.testY,predictedData)
elif 'recall' in str(self.scoreParam):
score = recall_score(self.testY,predictedData, average='macro')
elif 'precision' in str(self.scoreParam):
score = precision_score(self.testY,predictedData,average='macro')
elif 'f1_score' in str(self.scoreParam):
score = f1_score(self.testY,predictedData, average='macro')
elif 'roc_auc' in str(self.scoreParam):
score = roc_auc_score(self.testY,predictedData,average="macro")
score = round((score*100),2)
self.log.info("----------> Testing Score: "+str(score))
scoreRNN[1] = score
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"Recurrent Neural Network","FeatureEngineering":"'+str(self.best_feature_model)+'","Score":'+str(scoreRNN[1])+'}'
self.log.info('Status:- |... DL Algorithm applied: Recurrent Neural Network')
self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2)))
if "Recurrent Neural Network (GRU)"in self.modelList:
self.log.info("-------> Model Name: Recurrent Neural Network (GRU)")
start = time.time()
data = self.modelParams["Recurrent Neural Network (GRU)"]
print(data)
p = {"RNNType":["GRU"],
"numRNNLayers":[int(n) for n in data["numRNNLayers"].split(",")],
"activation":data["activation"].split(","),
"last_activation":data["last_activation"].split(","),
"optimizer":data["optimizer"].split(","),
"losses":data["losses"].split(","),
"first_neuron":[int(n) for n in data["first_layer"].split(",")],
"shapes": data["shapes"].split(","),
"hidden_layers":[int(n) for n in data["hidden_layers"].split(",")],
"dropout": [float(n) for n in data["dropout"].split(",")],
"lr": [float(n) for n in data["learning_rate"].split(",")],
"batch_size": [int(n) for n in data["batch_size"].split(",")],
"epochs": [int(n) for n in data["epochs"].split(",")]}
param_combinations = int(np.prod([len(x.split(',')) for x in p]))
round_limit = self.roundLimit if not self.roundLimit else min(self.roundLimit, param_combinations)
scan_object = talos.Scan(x=X_train,
y=y_train,
x_val = X_test,
y_val = y_test,
model = modelObj.RNNClassification,
experiment_name='RNN',
params=p,
round_limit=round_limit,
random_method=self.randomMethod
)
matrix_type = 'val_acc'
if self.scoreParam.lower() == 'accuracy':
matrix_type = 'val_acc'
elif(self.scoreParam.lower() == 'roc_auc'):
matrix_type = 'val_auc'
elif(self.scoreParam.lower() == 'recall'):
matrix_type = 'val_recall_m'
elif(self.scoreParam.lower() == 'precision'):
matrix_type = 'val_precision_m'
elif(self.scoreParam.lower() == 'f1_score'):
matrix_type = 'val_f1_m'
analyze_objectRNNGRU = talos.Analyze(scan_object)
highValAccRNNGRU = analyze_objectRNNGRU.high(matrix_type)
dfRNNGRU = analyze_objectRNNGRU.data
newdfRNNGRU = dfRNNGRU.loc[dfRNNGRU[matrix_type] == highValAccRNNGRU]
if(len(newdfRNNGRU) > 1):
lowLoss = analyze_objectRNNGRU.low('val_loss')
newdfRNNGRU = newdfRNNGRU.loc[newdfRNNGRU['val_loss'] == lowLoss]
best_paramsRNNGRU["RNNType"] = "GRU"
best_paramsRNNGRU["numRNNLayers"] = list(newdfRNNGRU["numRNNLayers"])[0]
best_paramsRNNGRU["activation"] = list(newdfRNNGRU["activation"])[0]
best_paramsRNNGRU["optimizer"] = list(newdfRNNGRU["optimizer"])[0]
best_paramsRNNGRU["losses"] = list(newdfRNNGRU["losses"])[0]
best_paramsRNNGRU["first_layer"] = list(newdfRNNGRU["first_neuron"])[0]
best_paramsRNNGRU["shapes"] = list(newdfRNNGRU["shapes"])[0]
best_paramsRNNGRU["hidden_layers"] = list(newdfRNNGRU["hidden_layers"])[0]
best_paramsRNNGRU["dropout"] = list(newdfRNNGRU["dropout"])[0]
best_paramsRNNGRU["batch_size"] = list(newdfRNNGRU["batch_size"])[0]
best_paramsRNNGRU["epochs"] = list(newdfRNNGRU["epochs"])[0]
best_paramsRNNGRU["lr"] = list(newdfRNNGRU["lr"])[0]
best_paramsRNNGRU["last_activation"] = list(newdfRNNGRU["last_activation"])[0]
best_modelRNNGRU = scan_object.best_model(metric=matrix_type, asc=False)
try:
if(len(best_paramsRNNGRU["losses"]) == 0):
loss_matrix = 'binary_crossentropy'
else:
loss_matrix = best_paramsRNNGRU["losses"][0]
if(len(best_paramsRNNGRU["optimizer"]) == 0):
optimizer = 'Nadam'
else:
optimizer = best_paramsRNNGRU["optimizer"][0]
if(best_paramsRNNGRU["batch_size"]== 0):
batchsize = 32
else:
batchsize = best_paramsRNNGRU["batch_size"][0]
except:
loss_matrix = 'binary_crossentropy'
optimizer = 'Nadam'
batchsize = 32
if self.scoreParam == 'accuracy':
best_modelRNNGRU.compile(loss=loss_matrix,optimizer=optimizer, metrics=['accuracy'])
elif self.scoreParam == 'recall':
best_modelRNNGRU.compile(loss=loss_matrix,optimizer=optimizer, metrics=[recall_m])
elif self.scoreParam == 'roc_auc':
best_modelRNNGRU.compile(loss=loss_matrix,optimizer=optimizer, metrics=[tf.keras.metrics.AUC()])
elif self.scoreParam == 'precision':
best_modelRNNGRU.compile(loss=loss_matrix,optimizer=optimizer, metrics=[precision_m])
elif self.scoreParam == 'f1_score':
best_modelRNNGRU.compile(loss=loss_matrix,optimizer=optimizer, metrics=[f1_m])
self.log.info("----------> Score Matrix: "+str(best_modelRNNGRU.metrics_names))
scoreRNNGRU = best_modelRNNGRU.evaluate(X1,y, batch_size=batchsize)
self.log.info("----------> Score: "+str(scoreRNNGRU))
self.log.info("----------> Model Params: "+str(best_paramsRNNGRU))
executionTime=time.time() - start
self.log.info('----------> Total Execution: '+str(executionTime)+'\\n')
XSNN = |
np.expand_dims(self.testX, axis=2)
#predictedData = best_modelRNNGRU.predict_classes(XSNN)
predictedData=np.argmax(best_modelRNNGRU.predict(XSNN),axis=1)
#predictedData = best_modelSNN.predict(self.testX)
if 'accuracy' in str(self.scoreParam):
score = accuracy_score(self.testY,predictedData)
elif 'recall' in str(self.scoreParam):
score = recall_score(self.testY,predictedData, average='macro')
elif 'precision' in str(self.scoreParam):
score = precision_score(self.testY,predictedData,average='macro')
elif 'f1_score' in str(self.scoreParam):
score = f1_score(self.testY,predictedData, average='macro')
elif 'roc_auc' in str(self.scoreParam):
score = roc_auc_score(self.testY,predictedData,average="macro")
score = round((score*100),2)
self.log.info("----------> Testing Score: "+str(score))
scoreRNNGRU[1] = score
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"Recurrent Neural Network (GRU)","FeatureEngineering":"'+str(self.best_feature_model)+'","Score":'+str(scoreRNNGRU[1])+'}'
self.log.info('Status:- |... DL Algorithm applied: Recurrent Neural Network (GRU)')
self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2)))
if "Recurrent Neural Network (LSTM)"in self.modelList:
self.log.info("-------> Model Name: Recurrent Neural Network (LSTM)")
start = time.time()
data = self.modelParams["Recurrent Neural Network (LSTM)"]
p = {"RNNType":["LSTM"],
"numRNNLayers":[int(n) for n in data["numRNNLayers"].split(",")],
"activation":data["activation"].split(","),
"last_activation":data["last_activation"].split(","),
"optimizer":data["optimizer"].split(","),
"losses":data["losses"].split(","),
"first_neuron":[int(n) for n in data["first_layer"].split(",")],
"shapes": data["shapes"].split(","),
"hidden_layers":[int(n) for n in data["hidden_layers"].split(",")],
"dropout": [float(n) for n in data["dropout"].split(",")],
"lr": [float(n) for n in data["learning_rate"].split(",")],
"batch_size": [int(n) for n in data["batch_size"].split(",")],
"epochs": [int(n) for n in data["epochs"].split(",")]}
param_combinations = int(np.prod([len(x.split(',')) for x in p]))
round_limit = self.roundLimit if not self.roundLimit else min(self.roundLimit, param_combinations)
scan_object = talos.Scan(x=X_train,
y=y_train,
x_val = X_test,
y_val = y_test,
model = modelObj.RNNClassification,
experiment_name='RNN',
params=p,
round_limit=round_limit,
random_method=self.randomMethod
)
matrix_type = 'val_acc'
if self.scoreParam.lower() == 'accuracy':
matrix_type = 'val_acc'
elif(self.scoreParam.lower() == 'roc_auc'):
matrix_type = 'val_auc'
elif(self.scoreParam.lower() == 'recall'):
matrix_type = 'val_recall_m'
elif(self.scoreParam.lower() == 'precision'):
matrix_type = 'val_precision_m'
elif(self.scoreParam.lower() == 'f1_score'):
matrix_type = 'val_f1_m'
analyze_objectRNNLSTM = talos.Analyze(scan_object)
highValAccRNNLSTM = analyze_objectRNNLSTM.high(matrix_type)
dfRNNLSTM = analyze_objectRNNLSTM.data
newdfRNNLSTM = dfRNNLSTM.loc[dfRNNLSTM[matrix_type] == highValAccRNNLSTM]
if(len(newdfRNNLSTM) > 1):
lowLoss = analyze_objectRNNLSTM.low('val_loss')
newdfRNNLSTM = newdfRNNLSTM.loc[newdfRNNLSTM['val_loss'] == lowLoss]
best_paramsRNNLSTM["RNNType"] = "LSTM"
best_paramsRNNLSTM["numRNNLayers"] = list(newdfRNNLSTM["numRNNLayers"])[0]
best_paramsRNNLSTM["activation"] = list(newdfRNNLSTM["activation"])[0]
best_paramsRNNLSTM["optimizer"] = list(newdfRNNLSTM["optimizer"])[0]
best_paramsRNNLSTM["losses"] = list(newdfRNNLSTM["losses"])[0]
best_paramsRNNLSTM["first_layer"] = list(newdfRNNLSTM["first_neuron"])[0]
best_paramsRNNLSTM["shapes"] = list(newdfRNNLSTM["shapes"])[0]
best_paramsRNNLSTM["hidden_layers"] = list(newdfRNNLSTM["hidden_layers"])[0]
best_paramsRNNLSTM["dropout"] = list(newdfRNNLSTM["dropout"])[0]
best_paramsRNNLSTM["batch_size"] = list(newdfRNNLSTM["batch_size"])[0]
best_paramsRNNLSTM["epochs"] = list(newdfRNNLSTM["epochs"])[0]
best_paramsRNNLSTM["lr"] = list(newdfRNNLSTM["lr"])[0]
best_paramsRNNLSTM["last_activation"] = list(newdfRNNLSTM["last_activation"])[0]
best_modelRNNLSTM = scan_object.best_model(metric=matrix_type, asc=False)
try:
if(len(best_paramsRNNLSTM["losses"]) == 0):
loss_matrix = 'binary_crossentropy'
else:
loss_matrix = best_paramsRNNLSTM["losses"][0]
if(len(best_paramsRNNLSTM["optimizer"]) == 0):
optimizer = 'Nadam'
else:
optimizer = best_paramsRNNLSTM["optimizer"][0]
if(best_paramsRNNLSTM["batch_size"] == 0):
batchsize = 32
else:
batchsize = best_paramsRNNLSTM["batch_size"][0]
except:
loss_matrix = 'binary_crossentropy'
optimizer = 'Nadam'
batchsize = 32
if self.scoreParam == 'accuracy':
best_modelRNNLSTM.compile(loss=loss_matrix,optimizer=optimizer, metrics=['accuracy'])
elif self.scoreParam == 'recall':
best_modelRNNLSTM.compile(loss=loss_matrix,optimizer=optimizer, metrics=[recall_m])
elif self.scoreParam == 'roc_auc':
best_modelRNNLSTM.compile(loss=loss_matrix,optimizer=optimizer, metrics=[tf.keras.metrics.AUC()])
elif self.scoreParam == 'precision':
best_modelRNNLSTM.compile(loss=loss_matrix,optimizer=optimizer, metrics=[precision_m])
elif self.scoreParam == 'f1_score':
best_modelRNNLSTM.compile(loss=loss_matrix,optimizer=optimizer, metrics=[f1_m])
self.log.info("----------> Score Matrix: "+str(best_modelRNNLSTM.metrics_names))
scoreRNNLSTM = best_modelRNNLSTM.evaluate(X1,y, batch_size=batchsize)
self.log.info("----------> Score: "+str(scoreRNNLSTM))
self.log.info("----------> Model Params: "+str(best_paramsRNNLSTM))
executionTime=time.time() - start
self.log.info('----------> Total Execution: '+str(executionTime)+'\\n')
XSNN = np.expand_dims(self.testX, axis=2)
#predictedData = best_modelRNNLSTM.predict_classes(XSNN)
predictedData=np.argmax(best_modelRNNLSTM.predict(XSNN),axis=1)
#predictedData = best_modelSNN.predict(self.testX)
if 'accuracy' in str(self.scoreParam):
score = accuracy_score(self.testY,predictedData)
elif 'recall' in str(self.scoreParam):
score = recall_score(self.testY,predictedData, average='macro')
elif 'precision' in str(self.scoreParam):
score = precision_score(self.testY,predictedData,average='macro')
elif 'f1_score' in str(self.scoreParam):
score = f1_score(self.testY,predictedData, average='macro')
elif 'roc_auc' in str(self.scoreParam):
score = roc_auc_score(self.testY,predictedData,average="macro")
score = round((score*100),2)
self.log.info("----------> Testing Score: "+str(score))
scoreRNNLSTM[1] = score
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"Recurrent Neural Network (LSTM)","FeatureEngineering":"'+str(self.best_feature_model)+'","Score":'+str(scoreRNNLSTM[1])+'}'
self.log.info('Status:- |... DL Algorithm applied: Recurrent Neural Network (LSTM)')
self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2)))
if "Convolutional Neural Network (1D)"in self.modelList:
self.log.info("-------> Model Name: CNN")
start = time.time()
data = self.modelParams["Convolutional Neural Network (1D)"]
p = {"activation":data["activation"].split(","),
"last_activation":data["last_activation"].split(","),
"numConvLayers":[int(n) for n in data["numConvLayers"].split(",")],
"MaxPool":data["activation"].split(","),
"optimizer":data["optimizer"].split(","),
"losses":data["losses"].split(","),
"first_neuron":[int(n) for n in data["first_layer"].split(",")],
"shapes": data["shapes"].split(","),
"hidden_layers":[int(n) for n in data["hidden_layers"].split(",")],
"dropout": [float(n) for n in data["dropout"].split(",")],
"lr": [float(n) for n in data["learning_rate"].split(",")],
"batch_size": [int(n) for n in data["batch_size"].split(",")],
"epochs": [int(n) for n in data["epochs"].split(",")]}
param_combinations = int(np.prod([len(x.split(',')) for x in p]))
round_limit = self.roundLimit if not self.roundLimit else min(self.roundLimit, param_combinations)
scan_object = talos.Scan(x=X_train,
y=y_train,
x_val = X_test,
y_val = y_test,
model = modelObj.CNNClassification,
experiment_name='CNN',
params=p,
round_limit=round_limit,
random_method=self.randomMethod
)
matrix_type = 'val_acc'
if self.scoreParam.lower() == 'accuracy':
matrix_type = 'val_acc'
elif(self.scoreParam.lower() == 'roc_auc'):
matrix_type = 'val_auc'
elif(self.scoreParam.lower() == 'recall'):
matrix_type = 'val_recall_m'
elif(self.scoreParam.lower() == 'precision'):
matrix_type = 'val_precision_m'
elif(self |
.scoreParam.lower() == 'f1_score'):
matrix_type = 'val_f1_m'
analyze_objectCNN = talos.Analyze(scan_object)
highValAccCNN = analyze_objectCNN.high(matrix_type)
dfCNN = analyze_objectCNN.data
newdfCNN = dfCNN.loc[dfCNN[matrix_type] == highValAccCNN]
if(len(newdfCNN) > 1):
lowLoss = analyze_objectCNN.low('val_loss')
newdfCNN = newdfCNN.loc[newdfCNN['val_loss'] == lowLoss]
best_paramsCNN["numConvLayers"] = list(newdfCNN["numConvLayers"])
best_paramsCNN["MaxPool"] = list(newdfCNN["MaxPool"])
best_paramsCNN["activation"] = list(newdfCNN["activation"])
best_paramsCNN["optimizer"] = list(newdfCNN["optimizer"])
best_paramsCNN["losses"] = list(newdfCNN["losses"])
best_paramsCNN["first_layer"] = list(newdfCNN["first_neuron"])
best_paramsCNN["shapes"] = list(newdfCNN["shapes"])
best_paramsCNN["hidden_layers"] = list(newdfCNN["hidden_layers"])
best_paramsCNN["dropout"] = list(newdfCNN["dropout"])
best_paramsCNN["batch_size"] = list(newdfCNN["batch_size"])
best_paramsCNN["epochs"] = list(newdfCNN["epochs"])
best_paramsCNN["lr"] = list(newdfCNN["lr"])
best_paramsCNN["last_activation"] = list(newdfCNN["last_activation"])[0]
best_modelCNN = scan_object.best_model(metric='val_acc', asc=True)
try:
if(len(best_paramsCNN["losses"]) == 0):
loss_matrix = 'binary_crossentropy'
else:
loss_matrix = best_paramsCNN["losses"][0]
if(len(best_paramsCNN["optimizer"]) == 0):
optimizer = 'Nadam'
else:
optimizer = best_paramsCNN["optimizer"][0]
if(best_paramsCNN["batch_size"] == 0):
batchsize = 32
else:
batchsize = best_paramsCNN["batch_size"][0]
except:
loss_matrix = 'binary_crossentropy'
optimizer = 'Nadam'
batchsize = 32
if self.scoreParam == 'accuracy':
best_modelCNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=['accuracy'])
elif self.scoreParam == 'recall':
best_modelCNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[recall_m])
elif self.scoreParam == 'precision':
best_modelCNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[precision_m])
elif self.scoreParam == 'roc_auc':
best_modelCNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[tf.keras.metrics.AUC()])
elif self.scoreParam == 'f1_score':
best_modelCNN.compile(loss=loss_matrix,optimizer=optimizer, metrics=[f1_m])
self.log.info("----------> Score Matrix: "+str(best_modelCNN.metrics_names))
scoreCNN = best_modelCNN.evaluate(X1,y, batch_size=batchsize)
self.log.info("----------> Score: "+str(scoreCNN))
self.log.info("----------> Model Params: "+str(best_paramsCNN))
executionTime=time.time() - start
self.log.info('----------> Total Execution: '+str(executionTime)+'\\n')
XSNN = np.expand_dims(self.testX, axis=2)
#predictedData = best_modelCNN.predict_classes(XSNN)
predictedData=np.argmax(best_modelCNN.predict(XSNN),axis=1)
#predictedData = best_modelSNN.predict(self.testX)
if 'accuracy' in str(self.scoreParam):
score = accuracy_score(self.testY,predictedData)
elif 'recall' in str(self.scoreParam):
score = recall_score(self.testY,predictedData, average='macro')
elif 'precision' in str(self.scoreParam):
score = precision_score(self.testY,predictedData,average='macro')
elif 'f1_score' in str(self.scoreParam):
score = f1_score(self.testY,predictedData, average='macro')
elif 'roc_auc' in str(self.scoreParam):
score = roc_auc_score(self.testY,predictedData,average="macro")
score = round((score*100),2)
self.log.info("----------> Testing Score: "+str(score))
scoreCNN[1] = score
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"Convolutional Neural Network (1D)","FeatureEngineering":"'+str(self.best_feature_model)+'","Score":'+str(scoreCNN[1])+'}'
self.log.info('Status:- |... DL Algorithm applied: Convolutional Neural Network (1D)')
self.log.info('Status:- |... Score after hyperparameter tuning: '+str(round(score,2)))
modelScore = []
if len(scoreSNN) != 0:
modelScore.append(scoreSNN[1])
if len(scoreRNN) != 0:
modelScore.append(scoreRNN[1])
if len(scoreRNNGRU) != 0:
modelScore.append(scoreRNNGRU[1])
if len(scoreRNNLSTM) != 0:
modelScore.append(scoreRNNLSTM[1])
if len(scoreCNN) != 0:
modelScore.append(scoreCNN[1])
selectedModel = ""
best_params=""
if len(scoreSNN) != 0 and max(modelScore) == scoreSNN[1]:
selectedModel = "Neural Network"
best_model = best_modelSNN
best_params = best_paramsSNN
elif len(scoreRNN) != 0 and max(modelScore) == scoreRNN[1]:
selectedModel = "Recurrent Neural Network"
best_model = best_modelRNN
best_params = best_paramsRNN
elif len(scoreRNNGRU) != 0 and max(modelScore) == scoreRNNGRU[1]:
selectedModel = "Recurrent Neural Network (GRU)"
best_model = best_modelRNNGRU
best_params = best_paramsRNNGRU
elif len(scoreRNNLSTM) != 0 and max(modelScore) == scoreRNNLSTM[1]:
selectedModel = "Recurrent Neural Network (LSTM)"
best_model = best_modelRNNLSTM
best_params = best_paramsRNNLSTM
elif len(scoreCNN) != 0 and max(modelScore) == scoreCNN[1]:
selectedModel = "Convolutional Neural Network (1D)"
best_model = best_modelCNN
best_params = best_paramsCNN
modelScore = max(modelScore)
executionTime=time.time() - start
self.log.info("-------> ExecutionTime(sec) :"+str(executionTime)+'\\n')
self.log.info('Status:- |... Best Algorithm selected: '+str(selectedModel)+' '+str(round(modelScore,2)))
self.log.info('-------> Best Params: '+str(best_params))
return selectedModel,modelScore,best_model,best_params,X1,XSNN,scoredetails,loss_matrix,optimizer
except Exception as inst:
self.log.info( '\\n-----> classificationModel failed!!!.'+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import pandas as pd
import numpy as np
from mlxtend.frequent_patterns import apriori, association_rules
from mlxtend.preprocessing import TransactionEncoder
import matplotlib.pyplot as plt
import json
import logging
import os,sys
def hot_encode(x):
if(int(x)<= 0):
return 0
if(int(x)>= 1):
return 1
class associationrules:
def __init__(self,dataframe,association_rule_conf,modelparam,invoiceNoFeature,itemFeature):
self.minSupport = modelparam['minSupport']
self.metric = modelparam['metric']
self.minThreshold = modelparam['minThreshold']
self.data = dataframe
self.invoiceNoFeature = invoiceNoFeature
self.itemFeature = itemFeature
self.log = logging.getLogger('eion')
def apply_associationRules(self,outputLocation):
self.data= self.data[[self.itemFeature,self.invoiceNoFeature]]
self.data[self.itemFeature] = self.data[self.itemFeature].str.strip()
self.data.dropna(axis = 0, subset =[self.invoiceNoFeature], inplace = True)
self.data[self.invoiceNoFeature] = self.data[self.invoiceNoFeature].astype('str')
self.data = self.data.groupby([self.invoiceNoFeature,self.itemFeature]).size()
self.data=self.data.unstack().reset_index().fillna('0').set_index(self.invoiceNoFeature)
self.data = self.data.applymap(hot_encode)
ohe_df = self.data
'''
print(self.data)
sys.exit()
items = []
for col in list(self.data):
ucols = self.data[col].dropna().unique()
#print('ucols :',ucols)
if len(ucols) > 0:
items = items + list(set(ucols) - set(items))
#items = self.data.apply(lambda col: col.unique())
#print(items)
#items = (self.data[self.masterColumn].unique())
#print(items)
self.log.info("-------> Total Unique Items: "+str(len(items)))
encoded_vals = []
for index, row in self.data.iterrows():
labels = {}
uncommons = list(set(items) - set(row))
commons = list(set(items).intersection(row))
for uc in uncommons:
labels[uc] = 0
for com in commons:
labels[com] = 1
encoded_vals.append(labels)
ohe_df = pd.DataFrame(encoded_vals)
#print(ohe_df)
'''
freq_items = apriori(ohe_df, min_support=self.minSupport, use_colnames=True)
self.log.info('Status:- |... AssociationRule Algorithm applied: Apriori')
if not freq_items.empty:
self.log.info("\\n------------ Frequent Item Set --------------- ")
self.log.info(freq_items)
save_freq_items = pd.DataFrame()
save_freq_items["itemsets"] = freq_items["itemsets"].apply(lambda x: ', '.join(list(x))).astype("unicode")
outputfile = os.path.join(outputLocation,'frequentItems.csv')
save_freq_items.to_csv(outputfile)
self.log.info('-------> FreqentItems File Name:'+outputfile)
rules = association_rules(freq_items, metric=self.metric, min_threshold=self.minThreshold)
if not rules.empty:
#rules = rules.sort_values(['confidence', 'lift'], ascending =[False, False])
self.log.info("\\n------------ Rules --------------- ")
for index, row in rules.iterrows():
self.log.info("------->Rule: "+ str(row['antecedents']) + " |
-> " + str(row['consequents']))
self.log.info("---------->Support: "+ str(row['support']))
self.log.info("---------->Confidence: "+ str(row['confidence']))
self.log.info("---------->Lift: "+ str(row['lift']))
#rules['antecedents'] = lis |
try:
start_time = time.time()
objConvUtility=model_converter(model_path,output_path,input_format,output_format,input_shape)
objConvUtility.convert()
end_time = time.time()
log.info(f"Time required for conversion: {end_time - start_time} sec")
log.info(f'\\nConverting {input_format} to {output_format} Successful')
output['Convert'] = "Success"
except Exception as e:
output['Convert'] = "Error"
log.info('Error: ' + str(e))
log.error(e, exc_info=True)
if 'not supported' in str(e):
output['sub error'] = "Not supported"
output = json.dumps(output)
log.info(f'Output: {output}')
return output
def convert(config_file):
with open(config_file, 'r') as f:
config = json.load(f)
model_path = config['advance']['aionConversionUtility']['modelpath']
output_path = config['advance']['aionConversionUtility']['deployedlocation']
input_format = get_true_option(config['advance']['aionConversionUtility']['inputModelType'],'').lower()
output_format = get_true_option(config['advance']['aionConversionUtility']['outputModelType'],'').lower()
if input_format=="keras":
input_shape = int(config['advance']['aionConversionUtility']['inputShape'])
if input_format!="keras":
input_shape = config['advance']['aionConversionUtility']['numberoffeatures']
input_shape = int(input_shape) if input_shape else 0
#input_shape = int(config['advance']['aionConversionUtility']['numberoffeatures'])
output = run(model_path, output_path, input_format, output_format, input_shape)
print(output)<s>
class aionRunTimeUtility:
# def __init__(self):
# print("AI.ON ConversionUtility function init...")
def executeOnRuntime(self,inputModelName,inputDataSet):
# print("AI.ON ConversionUtility function starts...")
RuntimeType = inputModelName.rsplit('.', 1)[1]
inputDataType = inputDataSet.rsplit('.', 1)[1]
if((RuntimeType == 'ONNX' or RuntimeType == 'onnx') and (inputDataType.lower()=='json')):
# print("Inference through ONNX Runtime started [ML]")
import pandas
import json
with open(inputDataSet) as datafile:
data = json.load(datafile)
dataframe = pandas.DataFrame(data,index=[0])
import numpy
import onnxruntime as rt
sess = rt.InferenceSession(inputModelName)
input_name = sess.get_inputs()[0].name
label_name = sess.get_outputs()[0].name
inputsize=sess.get_inputs()[0].shape
first_n_column = dataframe.iloc[: , :inputsize[1]]
dataset = first_n_column.values
if(inputsize[1]!=len(dataframe.columns)):
print("Error : Input Data size does not match")
return 0
pred_onx = sess.run([label_name], {input_name: dataset.astype(numpy.float32)[0:1]})[0]
# for i in range(0, 1):
#print("ONNX Runtime Prediction [csv]: ",pred_onx)
output = numpy.squeeze(pred_onx)
predictions = numpy.squeeze(output)
prediction = numpy.argmax(predictions)
return(prediction)
# print("Inference through ONNX modelcompleted ")
if((RuntimeType == 'ONNX' or RuntimeType == 'onnx') and (inputDataType!='json')):
import numpy as np
import onnxruntime as rt
from tensorflow.keras.preprocessing import image
sess = rt.InferenceSession(inputModelName)
input_name = sess.get_inputs()[0].name
label_name = sess.get_outputs()[0].name
inputsize=sess.get_inputs()[0].shape
img = image.load_img(inputDataSet, target_size=(inputsize[1], inputsize[2]))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
pred_onx = sess.run([label_name], {input_name: x.astype(np.float32)[0:1]})[0]
output = np.squeeze(pred_onx)
predictions = np.squeeze(output)
return(pred_onx)
if((RuntimeType == 'TFLITE' or RuntimeType == 'tflite')and (inputDataType=='json')):
import numpy as np
import tensorflow as tf
import pandas
from numpy import asarray
interpreter = tf.lite.Interpreter(model_path=inputModelName)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_shape = input_details[0]['shape']
import pandas
import json
with open(inputDataSet) as datafile:
data = json.load(datafile)
dataframe = pandas.DataFrame(data,index=[0])
dataset = dataframe.values
XYZ = dataset[:,0:input_shape[1]].astype(float)
input_data = asarray(XYZ[0]).reshape((1, input_shape[1]))
for i in range(0, 1):
input_data = asarray(XYZ[i]).reshape((1,input_shape[1]))
interpreter.set_tensor(input_details[0]['index'], input_data.astype(np.float32)[0:1])
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
predictions = np.squeeze(output_data)
prediction = np.argmax(predictions)
return(prediction)
if((RuntimeType == 'TFLITE' or RuntimeType == 'tflite') and (inputDataType!='json')):
import numpy as np
from tensorflow.keras.preprocessing import image
import os
import tensorflow as tf
import pandas
from numpy import asarray
interpreter = tf.lite.Interpreter(model_path=inputModelName)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_shape = input_details[0]['shape']
img = image.load_img(inputDataSet, target_size=(input_shape[1], input_shape[2]))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
interpreter.set_tensor(input_details[0]['index'], x.astype(np.float32)[0:1])
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
predictions = np.squeeze(output_data)
prediction = np.argmax(predictions)
return(prediction)
def runTimeTesting(inputModelName,inputDataSet):
objRunTimeUtility=aionRunTimeUtility()
return(objRunTimeUtility.executeOnRuntime(inputModelName,inputDataSet))
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> import pandas
import numpy
import sys
import onnxruntime as rt
def onnx_runtime_validation(modelfile,datafile):
dataframe = pandas.read_csv(datafile)
df = dataframe.head(8)
dataset = df.values
sess = rt.InferenceSession(modelfile)
input_name = sess.get_inputs()[0].name
label_name = sess.get_outputs()[0].name
inputsize=sess.get_inputs()[0].shape
XYZ = dataset[:,0:inputsize[1]].astype(float)
pred_onx = sess.run([label_name], {input_name: XYZ.astype(numpy.float32)[0:8]})[0]
print("Prediction of AION generated/converted model on ONNX runtime for 8 sets of data")
for i in range(0, 8):
output = numpy.squeeze(pred_onx[i])
predictions = numpy.squeeze(output)
prediction = numpy.argmax(predictions)
df['predictions'] = predictions
result = df.to_json(orient="records")
return(result)
if __name__ == "__main__":
output = onnx_runtime_validation(sys.argv[1],sys.argv[2])
print("predictions:",output)
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import numpy as np
import pandas as pd
from nltk.tokenize import word_tokenize
# Private function
def unitvec(vec):
return vec / np.linalg.norm(vec)
def __word_average(vectors, sent, vector_size,key_to_index):
"""
Compute average word vector for a single doc/sentence.
"""
try:
mean = []
for word in sent:
index = key_to_index.get( word, None)
if index != None:
mean.append( vectors[index] )
if len(mean):
return unitvec(np.array(mean).mean(axis=0))
return np.zeros(vector_size)
except:
raise
# Private function
def __word_average_list(vectors, docs, embed_size,key_to_index):
"""
Compute average word vector for multiple docs, where docs had been tokenized.
"""
try:
return np.vstack([__word_average(vectors, sent, embed_size,key_to_index) for sent in docs])
except:
raise
def load_pretrained(path):
df = pd.read_csv(path, index_col=0,sep=' ',quotechar = ' ' , header=None, skiprows=1,encoding_errors= 'replace')
return len(df.columns), df
def get_model( df:pd.DataFrame):
index_to_key = {k:v for k,v in enumerate(df.index)}
key_to_index = {v:k for k,v in enumerate(df.index)}
df = df.to_numpy()
return df, index_to_key, key_to_index
def extractFeatureUsingPreTrainedModel(inputCorpus, pretrainedModelPath=None, loaded_model=False,key_to_index={}, embed_size=300):
"""
Extract feature vector from input Corpus using pretrained Vector model(word2vec,fasttext, glove(converted to word2vec format)
"""
try:
if inputCorpus is None:
return None
else:
if not pretrainedModelPath and ((isinstance(loaded_model, pd.DataFrame) and loaded_model.empty) or (not isinstance(loaded_model, pd.DataFrame) and not loaded_model)):
inputCorpusWordVectors = None
else:
if (isinstance(loaded_model, pd.DataFrame) and not loaded_model.empty) or loaded_model:
pretrainedModel = loaded_model
else:
embed_size, pretrainedModel = load_pretrained(pretrainedModelPath)
pretrainedModel, index_to_key,key_to_index = get_model( pretrainedModel)
if len(pretrainedModel):
input_docs_tokens_list = [word_tokenize(inputDoc) for inputDoc in inputCorpus]
inputCorpusWordVectors = __word_average_list(pretrainedModel, input_docs_tokens_list,embed_size,key_to_index)
else:
inputCorpusWordVectors = None
return inputCorpusWordVectors
except:
raise
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__))))
#from .eda import ExploreTextData <s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import pandas as pd
import logging
import numpy as np
import sys
from pathlib import Path
import nltk
from nltk.tokenize import sent_tokenize
from nltk import pos_tag
from nltk import ngrams
from nltk.corpus import wordnet
from nltk import RegexpParser
from textblob import TextBlob
import spacy
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.base import BaseEstimator, TransformerMixin
|
import urllib.request
import zipfile
import os
from os.path import expanduser
import platform
from text import TextCleaning as text_cleaner
from text.Embedding import extractFeatureUsingPreTrainedModel
logEnabled = False
spacy_nlp = None
def ExtractFeatureCountVectors(ngram_range=(1, 1),
max_df=1.0,
min_df=1,
max_features=None,
binary=False):
vectorizer = CountVectorizer(ngram_range = ngram_range, max_df = max_df, \\
min_df = min_df, max_features = max_features, binary = binary)
return vectorizer
def ExtractFeatureTfIdfVectors(ngram_range=(1, 1),
max_df=1.0,
min_df=1,
max_features=None,
binary=False,
norm='l2',
use_idf=True,
smooth_idf=True,
sublinear_tf=False):
vectorizer = TfidfVectorizer(ngram_range = ngram_range, max_df = max_df, \\
min_df = min_df, max_features = max_features, \\
binary = binary, norm = norm, use_idf = use_idf, \\
smooth_idf = smooth_idf, sublinear_tf = sublinear_tf)
return vectorizer
def GetPOSTags( inputText, getPOSTags_Lib='nltk'):
global spacy_nlp
tokens_postag_list = []
if (inputText == ""):
__Log("debug", "{} function: Input text is not provided".format(sys._getframe().f_code.co_name))
else:
if getPOSTags_Lib == 'spacy':
if spacy_nlp == None:
spacy_nlp = spacy.load('en_core_web_sm')
doc = spacy_nlp(inputText)
for token in doc:
tokens_postag_list.append((token.text, token.tag_))
elif getPOSTags_Lib == 'textblob':
doc = TextBlob(inputText)
tokens_postag_list = doc.tags
else:
tokensList = WordTokenize(inputText)
tokens_postag_list = pos_tag(tokensList)
return tokens_postag_list
def GetNGrams( inputText, ngramRange=(1,1)):
ngramslist = []
for n in range(ngramRange[0],ngramRange[1]+1):
nwordgrams = ngrams(inputText.split(), n)
ngramslist.extend([' '.join(grams) for grams in nwordgrams])
return ngramslist
def NamedEntityRecognition( inputText):
global spacy_nlp
neResultList = []
if (inputText == ""):
__Log("debug", "{} function: Input text is not provided".format(sys._getframe().f_code.co_name))
else:
if spacy_nlp == None:
spacy_nlp = spacy.load('en_core_web_sm')
doc = spacy_nlp(inputText)
neResultList = [(X.text, X.label_) for X in doc.ents]
return neResultList
def KeywordsExtraction( inputText, ratio=0.2, words = None, scores=False, pos_filter=('NN', 'JJ'), lemmatize=False):
keywordsList = []
if (inputText == ""):
__Log("debug", "{} function: Input text is not provided".format(sys._getframe().f_code.co_name))
else:
keywordsList = keywords(inputText, ratio = ratio, words = words, split=True, scores=scores,
pos_filter=pos_filter, lemmatize=lemmatize)
return keywordsList
def __get_nodes(parent):
nounList = []
verbList = []
for node in parent:
if type(node) is nltk.Tree:
if node.label() == "NP":
subList = []
for item in node.leaves():
subList.append(item[0])
nounList.append((" ".join(subList)))
elif node.label() == "VP":
subList = []
for item in node.leaves():
subList.append(item[0])
verbList.append((" ".join(subList)))
#verbList.append(node.leaves()[0][0])
__get_nodes(node)
result = {'NP': nounList, 'VP': verbList}
return result
def ShallowParsing( inputText, lib='spacy'):
tags = GetPOSTags(inputText, getPOSTags_Lib=lib)
chunk_regex = r"""
NBAR:
{<DT>?<NN.*|JJ.*>*<NN.*>+} # Nouns and Adjectives, terminated with Nouns
VBAR:
{<RB.?>*<VB.?>*<TO>?<JJ>*<VB.?>+<VB>?} # Verbs and Verb Phrases
NP:
{<NBAR>}
{<NBAR><IN><NBAR>} # Above, connected with in/of/etc...
VP:
{<VBAR>}
{<VBAR><IN><VBAR>} # Above, connected with in/of/etc...
"""
rp = RegexpParser(chunk_regex)
t = rp.parse(tags)
return __get_nodes(t)
def SyntacticAndEntityParsing(inputCorpus,
featuresList=['POSTags','NGrams','NamedEntityRecognition','KeywordsExtraction','ShallowParsing'],
posTagsLib='nltk',
ngramRange=(1,1),
ke_ratio=0.2,
ke_words = None,
ke_scores=False,
ke_pos_filter=('NN', 'JJ'),
ke_lemmatize=False):
columnsList = ['Input']
columnsList.extend(featuresList)
df = pd.DataFrame(columns=columnsList)
df['Input'] = inputCorpus
for feature in featuresList:
if feature == 'POSTags':
df[feature] = inputCorpus.apply(lambda x: GetPOSTags(x, posTagsLib))
if feature == 'NGrams':
df[feature] = inputCorpus.apply(lambda x: GetNGrams(x, ngramRange))
if feature == 'NamedEntityRecognition':
df[feature] = inputCorpus.apply(lambda x: NamedEntityRecognition(x))
if feature == 'KeywordsExtraction':
df[feature] = inputCorpus.apply(lambda x: KeywordsExtraction(x,
ratio=ke_ratio, words=ke_words,
scores=ke_scores, pos_filter=ke_pos_filter,
lemmatize=ke_lemmatize))
if feature == 'ShallowParsing':
df[feature] = inputCorpus.apply(lambda x: ShallowParsing(x, lib=posTagsLib))
return df
def __Log( logType="info", text=None):
if logType.lower() == "exception":
logging.exception( text)
elif logEnabled:
if logType.lower() == "info":
logging.info( text)
elif logType.lower() == "debug":
logging.debug( text)
def SentenceTokenize( inputText):
return text_cleaner.WordTokenize(inputText)
def WordTokenize( inputText, tokenizationLib = 'nltk'):
return text_cleaner.WordTokenize(inputText, tokenizationLib)
def Lemmatize( inputTokensList, lemmatizationLib = 'nltk'):
return text_cleaner.Lemmatize(inputTokensList, lemmatizationLib)
def Stemmize( inputTokensList):
return text_cleaner.Stemmize(inputTokensList)
def ToLowercase( inputText):
resultText = ""
if inputText is not None and inputText != "":
resultText = inputText.lower()
return resultText
def ToUppercase( inputText):
resultText = ""
if inputText is not None and inputText != '':
resultText = inputText.upper()
return resultText
def RemoveNoise(
inputText,
removeNoise_fHtmlDecode = True,
removeNoise_fRemoveHyperLinks = True,
removeNoise_fRemoveMentions = True,
removeNoise_fRemoveHashtags = True,
removeNoise_RemoveOrReplaceEmoji = 'remove',
removeNoise_fUnicodeToAscii = True,
removeNoise_fRemoveNonAscii = True):
return text_cleaner.RemoveNoise(inputText, removeNoise_fHtmlDecode, removeNoise_fRemoveHyperLinks, removeNoise_fRemoveMentions,
removeNoise_fRemoveHashtags, removeNoise_RemoveOrReplaceEmoji, removeNoise_fUnicodeToAscii, removeNoise_fRemoveNonAscii)
def RemoveStopwords( inputTokensList, stopwordsRemovalLib='nltk', stopwordsList = None, extend_or_replace='extend'):
return text_cleaner.RemoveStopwords(inputTokensList, stopwordsRemovalLib, stopwordsList, extend_or_replace)
def RemoveNumericTokens( inputText, removeNumeric_fIncludeSpecialCharacters=True):
return text_cleaner.RemoveNumericTokens(inputText, removeNumeric_fIncludeSpecialCharacters)
def RemovePunctuation( inputText, fRemovePuncWithinTokens=False):
return text_cleaner.RemovePunctuation(inputText, fRemovePuncWithinTokens)
def CorrectSpelling( inputTokensList):
return text_cleaner.CorrectSpelling(inputTokensList)
def ReplaceAcronym( inputTokensList, acrDict=None):
return text_cleaner.ReplaceAcronym(inputTokensList, acrDict)
def ExpandContractions( inputText, expandContractions_googleNewsWordVectorPath=None):
return text_cleaner.ExpandContractions(inputText, expandContractions_googleNewsWordVectorPath)
def get_pretrained_model_path():
try:
from appbe.dataPath import DATA_DIR
modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextProcessing'
except:
modelsPath = Path('aion')/'PreTrainedModels'/'TextProcessing'
if not modelsPath.exists():
modelsPath.mkdir(parents=True, exist_ok=True)
return modelsPath
def checkAndDownloadPretrainedModel(preTrainedModel, embedding_size=300):
models = {'glove':{50:'glove.6B.50d.w2vformat.txt',100:'glove.6B.100d.w2vformat.txt',200:'glove.6B.200d.w2vformat.txt',300:'glove.6B.300d.w2vformat.txt'}, 'fasttext':{300:'wiki-news-300d-1M.vec'}}
supported_models = [x for y in models.values() for x in y.values()]
embedding_sizes = {x:y.keys() for x,y in models.items()}
preTrainedModel = preTrainedModel.lower()
if preTrainedModel not in models.keys():
raise ValueError(f'model not supported: {preTrainedModel}')
if embedding_size not in embedding_sizes[preTrainedModel]:
raise ValueError(f"Embedding size '{embedding_size}' not supported for {preTrainedModel}")
selected_model = models[preTrainedModel][embedding_size]
modelsPath = get_pretrained_model_path()
p = modelsPath.glob('**/*')
modelsDownloaded = [x.name for x in p if x.name in supported_models]
if selected_model not in modelsDownloaded:
if preTrainedModel == "glove":
try:
local_file_path = modelsPath/f"glove.6B.{embedding_size}d.w2vformat.txt"
file_test, header_test = urllib.request.urlretrieve(f'https://aion-pretrained-models.s3.ap-south-1.amazonaws.com/text/glove.6B.{embedding_size}d.w2vformat.txt', local_file_path)
except Exception as e:
raise ValueError("Error: unable to download glove pretrained model, please try again or download it manually and placed it at {}. ".format(modelsPath)+str(e))
elif preTrainedModel == "fasttext":
try:
local_file_path = modelsPath/"wiki-news-300d-1M.vec.zip"
url = 'https://aion-pretrained-models.s3.ap-south-1.amazonaws.com/text/wiki-news-300d-1M.vec.zip'
file_test, header_test = urllib.request.urlretrieve(url, local_file_path)
with zipfile.ZipFile(local_file_path) as zip_ref:
zip_ref.extractall(modelsPath)
Path(local_file_path).unlink()
except Exception as e:
raise ValueError("Error: unable to download fastText pretrained model, please try again or download it manually and placed it at {}. ".format(location)+str(e))
return modelsPath/selected_model
def load_pretrained(path):
embeddings = {}
word = ''
with open(path, 'r', encoding="utf8") as f:
header = f.readline()
header = header.split(' ')
vocab_size = int(header[0])
embed_size = int(header[1])
for i in range(vocab_size):
data = f.readline().strip().split(' ')
word = data[0]
embeddings[word] = [float(x) for x in data[1:]]
return embeddings
class TextProcessing(BaseEstimator, TransformerMixin):
def __init__(self,
functionSequence = ['RemoveNoise','ExpandContractions','Normalize','ReplaceAcronym',
'CorrectSpelling','RemoveStopwords','RemovePunctuation','RemoveNumericTokens'],
fRemoveNoise = True,
fExpandContractions = False,
fNormalize = True,
fReplaceAcronym = False,
fCorrectSpelling = False,
fRemoveStopwords = True,
fRemovePunctuation = True,
fRemoveNumericTokens = True,
removeNoise_fHtmlDecode = True,
removeNoise_fRemoveHyperLinks = True,
removeNoise_fRemoveMentions = True,
removeNoise_fRemoveHashtags = True,
removeNoise_RemoveOrReplaceEmoji = 'remove',
removeNoise_fUnicodeToAscii = True,
removeNoise_fRemoveNonAscii = True,
tokenizationLib='nltk',
normalizationMethod = 'Lemmatization',
lemmatizationLib = 'nltk',
acronymDict = None,
stopwordsRemovalLib = 'nltk',
stopwordsList = None,
extend_or_replace_ |
stopwordslist = 'extend',
removeNumeric_fIncludeSpecialCharacters = True,
fRemovePuncWithinTokens = False,
data_path = None
):
global logEnabled
#logEnabled = EnableLogging
self.functionSequence = functionSequence
self.fRemoveNoise = fRemoveNoise
self.fExpandContractions = fExpandContractions
self.fNormalize = fNormalize
self.fReplaceAcronym = fReplaceAcronym
self.fCorrectSpelling = fCorrectSpelling
self.fRemoveStopwords = fRemoveStopwords
self.fRemovePunctuation = fRemovePunctuation
self.fRemoveNumericTokens = fRemoveNumericTokens
self.removeNoise_fHtmlDecode = removeNoise_fHtmlDecode
self.removeNoise_fRemoveHyperLinks = removeNoise_fRemoveHyperLinks
self.removeNoise_fRemoveMentions = removeNoise_fRemoveMentions
self.removeNoise_fRemoveHashtags = removeNoise_fRemoveHashtags
self.removeNoise_RemoveOrReplaceEmoji = removeNoise_RemoveOrReplaceEmoji
self.removeNoise_fUnicodeToAscii = removeNoise_fUnicodeToAscii
self.removeNoise_fRemoveNonAscii = removeNoise_fRemoveNonAscii
self.tokenizationLib = tokenizationLib
self.normalizationMethod = normalizationMethod
self.lemmatizationLib = lemmatizationLib
self.acronymDict = acronymDict
self.stopwordsRemovalLib = stopwordsRemovalLib
self.stopwordsList = stopwordsList
self.extend_or_replace_stopwordslist = extend_or_replace_stopwordslist
self.removeNumeric_fIncludeSpecialCharacters = removeNumeric_fIncludeSpecialCharacters
self.fRemovePuncWithinTokens = fRemovePuncWithinTokens
self.data_path = data_path
self.fit_and_transformed_ = False
def fit(self, x, y=None):
return self
def transform(self, x):
x = map(lambda inputText: text_cleaner.cleanText(inputText, functionSequence = self.functionSequence, fRemoveNoise = self.fRemoveNoise, fExpandContractions = self.fExpandContractions, fNormalize = self.fNormalize, fReplaceAcronym = self.fReplaceAcronym, fCorrectSpelling = self.fCorrectSpelling, fRemoveStopwords = self.fRemoveStopwords, fRemovePunctuation = self.fRemovePunctuation, fRemoveNumericTokens = self.fRemoveNumericTokens, removeNoise_fHtmlDecode = self.removeNoise_fHtmlDecode, removeNoise_fRemoveHyperLinks = self.removeNoise_fRemoveHyperLinks, removeNoise_fRemoveMentions = self.removeNoise_fRemoveMentions , removeNoise_fRemoveHashtags = self.removeNoise_fRemoveHashtags, removeNoise_RemoveOrReplaceEmoji = self.removeNoise_RemoveOrReplaceEmoji, removeNoise_fUnicodeToAscii = self.removeNoise_fUnicodeToAscii, removeNoise_fRemoveNonAscii = self.removeNoise_fRemoveNonAscii, tokenizationLib = self.tokenizationLib, normalizationMethod = self.normalizationMethod, lemmatizationLib = self.lemmatizationLib, acronymDict = self.acronymDict, stopwordsRemovalLib = self.stopwordsRemovalLib, stopwordsList = self.stopwordsList, extend_or_replace_stopwordslist = self.extend_or_replace_stopwordslist, removeNumeric_fIncludeSpecialCharacters = self.removeNumeric_fIncludeSpecialCharacters, fRemovePuncWithinTokens = self.fRemovePuncWithinTokens), x)
x = pd.Series(list(x))
if hasattr(self, 'fit_and_transformed_') and not self.fit_and_transformed_:
self.fit_and_transformed_ = True
if self.data_path and Path(self.data_path).exists():
x.to_csv(Path(self.data_path)/'text_cleaned.csv', index=False)
return x
def get_feature_names_out(self):
return ['tokenize']
class wordEmbedding(BaseEstimator, TransformerMixin):
def __init__(self, preTrainedModel, embeddingSize=300,external_model=None,external_model_type='binary'):
self.number_of_features = 0
self.embeddingSize = embeddingSize
self.preTrainedModel = preTrainedModel.lower()
self.external_model=external_model
self.external_model_type = external_model_type
if self.preTrainedModel == "glove":
self.preTrainedModelpath = f'glove.6B.{self.embeddingSize}d.w2vformat.txt'
self.binary = False
elif self.preTrainedModel == "fasttext":
self.preTrainedModelpath = 'wiki-news-300d-1M.vec'
self.binary = False
else:
raise ValueError(f'Model ({self.preTrainedModel}) not supported')
def fit(self, x, y=None):
return self
def transform(self, x):
if ((isinstance(self.external_model, pd.DataFrame) and not self.external_model.empty) or (not isinstance(self.external_model, pd.DataFrame) and self.external_model)):
if self.preTrainedModel == "fasttext" and self.external_model_type == 'binary':
print('Transforming using external binary')
extracted = np.vstack([self.external_model.get_sentence_vector( sentense) for sentense in x])
else:
print('Transforming using external vector')
extracted = extractFeatureUsingPreTrainedModel(x, pretrainedModelPath=None, loaded_model=self.external_model, embed_size=300)
else:
print('Transforming using Vector')
models_path = checkAndDownloadPretrainedModel(self.preTrainedModel, self.embeddingSize)
extracted = extractFeatureUsingPreTrainedModel(x, models_path)
self.number_of_features = extracted.shape[1]
return extracted
def get_feature_names_out(self):
return [str(x) for x in range(self.number_of_features)]
def get_feature_names(self):
return self.get_feature_names_out()
def getProcessedPOSTaggedData(pos_tagged_data):
def get_wordnet_post(tag):
if tag.startswith('V'):
return wordnet.VERB
elif tag.startswith('J'):
return wordnet.ADJ
elif tag.startswith('R'):
return wordnet.ADV
else:
return wordnet.NOUN
def process_pos_tagged_data(text):
processed_text = [f"{t[0]}_{get_wordnet_post(t[1])}" for t in text]
processed_text = " ".join(processed_text)
return processed_text
processed_pos_tagged_data = pos_tagged_data.apply(process_pos_tagged_data)
return processed_pos_tagged_data
class PosTagging(BaseEstimator, TransformerMixin):
def __init__(self, posTagsLib, data_path):
self.posTagsLib = posTagsLib
self.fit_and_transformed_ = False
self.data_path = data_path
def fit(self, x, y=None):
return self
def transform(self, x):
parsing_output = SyntacticAndEntityParsing(x, featuresList=['POSTags'], posTagsLib=self.posTagsLib)
output = getProcessedPOSTaggedData(parsing_output['POSTags'])
if not self.fit_and_transformed_:
self.fit_and_transformed_ = True
if self.data_path and Path(self.data_path).exists():
output.to_csv(Path(self.data_path)/'pos_tagged.csv', index=False)
return output
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import re
import string
import sys
import demoji
#demoji.download_codes()
import nltk
import spacy
from nltk.corpus import stopwords
from bs4 import BeautifulSoup
from text_unidecode import unidecode
from textblob import TextBlob
from spellchecker import SpellChecker
from nltk import pos_tag
from nltk.tokenize import word_tokenize
from nltk.corpus import wordnet
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.stem.porter import PorterStemmer
from spacy.lang.en import English
from collections import defaultdict
import contractions
spacy_nlp = None
def WordTokenize( inputText, tokenizationLib = 'nltk'):
tokenList = []
if inputText is not None and inputText != "":
tokenizationLib = tokenizationLib.lower()
if tokenizationLib == 'nltk':
tokenList = word_tokenize(inputText)
elif tokenizationLib == 'textblob':
tbObj = TextBlob(inputText)
tokenList = tbObj.words
elif tokenizationLib == 'spacy':
nlp = English()
nlpDoc = nlp(inputText)
for token in nlpDoc:
tokenList.append(token.text)
elif tokenizationLib == 'keras':
from tensorflow.keras.preprocessing.text import text_to_word_sequence
tokenList = text_to_word_sequence(inputText)
else:
tokenList = word_tokenize(inputText)
return tokenList
def SentenceTokenize( inputText):
sentenceList = []
if inputText is not None and inputText != "":
sentenceList = sent_tokenize(inputText)
return sentenceList
def Lemmatize(inputTokensList, lemmatizationLib = 'nltk'):
global spacy_nlp
lemmatized_list= []
lemmatizationLib = lemmatizationLib.lower()
if (inputTokensList is not None) and (len(inputTokensList)!=0):
if (lemmatizationLib == 'textblob'):
inputText = " ".join(inputTokensList)
sent = TextBlob(inputText)
tag_dict = {"J": 'a',
"N": 'n',
"V": 'v',
"R": 'r'}
words_and_tags = [(w, tag_dict.get(pos[0], 'n')) for w, pos in sent.tags]
lemmatized_list = [wd.lemmatize(tag) for wd, tag in words_and_tags]
if (lemmatizationLib == 'spacy'):
inputText = " ".join(inputTokensList)
if spacy_nlp == None:
spacy_nlp = spacy.load('en_core_web_sm')
doc = spacy_nlp(inputText)
for token in doc:
if token.text != token.lemma_:
if token.lemma_ != "-PRON-":
lemmatized_list.append(token.lemma_)
else:
lemmatized_list.append(token.text)
else:
lemmatized_list.append(token.text)
else:
tag_map = defaultdict(lambda : wordnet.NOUN)
tag_map['J'] = wordnet.ADJ
tag_map['V'] = wordnet.VERB
tag_map['R'] = wordnet.ADV
wnLemmatizer = WordNetLemmatizer()
token_tags = pos_tag(inputTokensList)
lemmatized_list = [wnLemmatizer.lemmatize(token, tag_map[tag[0]]) for token, tag in token_tags]
return lemmatized_list
def Stemmize(inputTokensList):
stemmedTokensList= []
if (inputTokensList is not None) and (len(inputTokensList)!=0):
porterStemmer = PorterStemmer()
stemmedTokensList = [porterStemmer.stem(token) for token in inputTokensList]
return stemmedTokensList
def ToLowercase(inputText):
resultText = ""
if inputText is not None and inputText != "":
resultText = inputText.lower()
return resultText
def ToUppercase(inputText):
resultText = ""
if inputText is not None and inputText != '':
resultText = inputText.upper()
return resultText
def RemoveNoise(inputText,
removeNoise_fHtmlDecode = True,
removeNoise_fRemoveHyperLinks = True,
removeNoise_fRemoveMentions = True,
removeNoise_fRemoveHashtags = True,
removeNoise_RemoveOrReplaceEmoji = 'remove',
removeNoise_fUnicodeToAscii = True,
removeNoise_fRemoveNonAscii = True):
if inputText is not None and inputText != "":
if removeNoise_fHtmlDecode == True:
inputText = BeautifulSoup(inputText, "html.parser").text
if removeNoise_fRemoveHyperLinks == True:
inputText = re.sub(r'https?:\\/\\/\\S*', '', inputText, flags=re.MULTILINE)
if removeNoise_fRemoveMentions == True:
inputText = re.sub('[@]+\\S+','', inputText)
if removeNoise_fRemoveHashtags == True:
inputText = re.sub('[#]+\\S+','', inputText)
if removeNoise_RemoveOrReplaceEmoji == 'remove':
inputText = demoji.replace(inputText, "")
elif removeNoise_RemoveOrReplaceEmoji == 'replace':
inputText = demoji.replace_with_desc(inputText, " ")
if removeNoise_fUnicodeToAscii == True:
inputText = unidecode(inputText)
if removeNoise_fRemoveNonAscii == True:
inputText= re.sub(r'[^\\x00-\\x7F]+',' ', inputText)
inputText = re.sub(r'\\s+', ' ', inputText)
inputText = inputText.strip()
return inputText
def RemoveStopwords(inputTokensList, stopwordsRemovalLib='nltk', stopwordsList = None, extend_or_replace='extend'):
resultTokensList = []
if (inputTokensList is not None) and (len(inputTokensList)!=0):
stopwordsRemovalLib= stopwordsRemovalLib.lower()
if stopwordsRemovalLib == 'spacy':
nlp = English()
stopwordRemovalList = nlp.Defaults.stop_words
else:
stopwordRemovalList = set(stopwords.words('english'))
if extend_or_replace == 'replace':
if stopwordsList is not None:
stopwordRemovalList = set(stopwordsList) |
else:
if stopwordsList:
stopwordRemovalList = stopwordRemovalList.union(set(stopwordsList))
resultTokensList = [word for word in inputTokensList if word not in stopwordRemovalList]
return resultTokensList
def RemoveNumericTokens(inputText, removeNumeric_fIncludeSpecialCharacters=True):
resultText = ""
if inputText is not None and inputText != "":
if removeNumeric_fIncludeSpecialCharacters == True:
#Remove tokens having numbers and punctuations
resultText = re.sub(r'\\b\\d+[^a-zA-Z]*\\d*\\b',' ', inputText)
else:
#Remove only numeric tokens
resultText = re.sub(r'\\b\\d+\\b','', inputText)
# convert consecutive whitespaces to single space in the results
resultText = re.sub(r'\\s+', ' ', resultText)
return resultText
def RemovePunctuation(inputText, fRemovePuncWithinTokens=False):
resultText = ""
if inputText is not None and len(inputText) != 0:
if fRemovePuncWithinTokens == True:
resultText = inputText.translate(str.maketrans("","", string.punctuation))
else:
punctuationList = list(string.punctuation)
tokensList = WordTokenize(inputText)
resultTokensList = [word for word in tokensList if word not in punctuationList]
resultText = " ".join(resultTokensList)
resultText = re.sub(r'\\s+', ' ', resultText)
return resultText
def CorrectSpelling(inputTokensList):
correctedTokensList = []
if (inputTokensList is not None) and (len(inputTokensList)!=0):
spell = SpellChecker()
for word in inputTokensList:
word = word.lower()
if word not in spell:
word = spell.correction(word)
if word:
correctedTokensList.append(word)
return correctedTokensList
def ReplaceAcronym(inputTokensList, acrDict=None):
resultTokensList = []
if (inputTokensList is not None) and (len(inputTokensList)!=0):
if ((acrDict is not None) and (len(acrDict) != 0)):
acrDictLowercase = dict((key.lower(), value.lower()) for key, value in acrDict.items())
resultTokensList = [acrDictLowercase.get(token.lower(), token.lower()) for token in inputTokensList]
else:
resultTokensList = inputTokensList
return resultTokensList
def ExpandContractions(inputText):
resultText = ""
if inputText != '':
resultText = contractions.fix(inputText)
return resultText
def cleanText( inputText,
functionSequence = ['RemoveNoise','ExpandContractions','Normalize','ReplaceAcronym',
'CorrectSpelling','RemoveStopwords','RemovePunctuation','RemoveNumericTokens'],
fRemoveNoise = True,
fExpandContractions = False,
fNormalize = True,
fReplaceAcronym = False,
fCorrectSpelling = False,
fRemoveStopwords = True,
fRemovePunctuation = True,
fRemoveNumericTokens = True,
removeNoise_fHtmlDecode = True,
removeNoise_fRemoveHyperLinks = True,
removeNoise_fRemoveMentions = True,
removeNoise_fRemoveHashtags = True,
removeNoise_RemoveOrReplaceEmoji = 'remove',
removeNoise_fUnicodeToAscii = True,
removeNoise_fRemoveNonAscii = True,
tokenizationLib='nltk',
normalizationMethod = 'Lemmatization',
lemmatizationLib = 'nltk',
acronymDict = None,
stopwordsRemovalLib = 'nltk',
stopwordsList = None,
extend_or_replace_stopwordslist = 'extend',
removeNumeric_fIncludeSpecialCharacters = True,
fRemovePuncWithinTokens = False
):
if inputText is not None and inputText != "":
for function in functionSequence:
if function == 'RemoveNoise':
if (fRemoveNoise == True):
inputText = RemoveNoise(inputText,
removeNoise_fHtmlDecode,
removeNoise_fRemoveHyperLinks,
removeNoise_fRemoveMentions,
removeNoise_fRemoveHashtags,
removeNoise_RemoveOrReplaceEmoji,
removeNoise_fUnicodeToAscii,
removeNoise_fRemoveNonAscii)
if function == 'ExpandContractions':
if (fExpandContractions == True):
inputText = ExpandContractions(inputText)
if function == 'Normalize':
if (fNormalize == True):
inputTokens = WordTokenize(inputText, tokenizationLib)
if (normalizationMethod == 'Stemming'):
inputTokens = Stemmize(inputTokens)
else:
inputTokens = Lemmatize(inputTokens, lemmatizationLib)
inputText = " ".join(inputTokens)
if function == 'ReplaceAcronym':
if fReplaceAcronym == True and (acronymDict is not None) and acronymDict != 'None':
inputText = ToLowercase(inputText)
inputTokens = WordTokenize(inputText, tokenizationLib)
inputTokens= ReplaceAcronym(inputTokens, acronymDict)
inputText = " ".join(inputTokens)
if function == 'CorrectSpelling':
if (fCorrectSpelling == True):
try:
inputTokens = WordTokenize(inputText, tokenizationLib)
inputTokens = CorrectSpelling(inputTokens)
inputText = " ".join(inputTokens)
except Exception as e:
print(e)
pass
if function == 'RemoveStopwords':
if (fRemoveStopwords == True):
inputText = ToLowercase(inputText)
inputTokens = WordTokenize(inputText, tokenizationLib)
inputTokens = RemoveStopwords(inputTokens, stopwordsRemovalLib, stopwordsList, extend_or_replace_stopwordslist)
inputText = " ".join(inputTokens)
if function == 'RemovePunctuation':
if (fRemovePunctuation == True):
inputText = RemovePunctuation(inputText, fRemovePuncWithinTokens)
if function == 'RemoveNumericTokens':
if (fRemoveNumericTokens == True):
inputText = RemoveNumericTokens(inputText, removeNumeric_fIncludeSpecialCharacters)
inputText = ToLowercase(inputText)
return inputText
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import openai
import tiktoken
import numpy as np
import pandas as pd
from pathlib import Path
from openai.embeddings_utils import get_embedding
from sklearn.base import BaseEstimator, TransformerMixin
class embedding(BaseEstimator, TransformerMixin):
def __init__(self, embedding_engine='Text-Embedding', embedding_ctx_size=8191, encoding_method='cl100k_base'):
self.embedding_engine = embedding_engine
self.embedding_ctx_size = embedding_ctx_size
self.encoding_method = encoding_method
self.number_of_features = 1536
def fit(self,X,y=None):
return self
def transform(self, X):
setup_openai()
X = map(lambda text: self.len_safe_get_embedding( text), X)
return list(X)
def split_large_text(self, large_text):
encoding = tiktoken.get_encoding( self.encoding_method)
tokenized_text = encoding.encode(large_text)
chunks = []
current_chunk = []
current_length = 0
for token in tokenized_text:
current_chunk.append(token)
current_length += 1
if current_length >= self.embedding_ctx_size:
chunks.append(encoding.decode(current_chunk).rstrip(' .,;'))
current_chunk = []
current_length = 0
if current_chunk:
chunks.append(encoding.decode(current_chunk).rstrip(' .,;'))
return chunks
def len_safe_get_embedding(self, text):
chunk_embeddings = []
chunk_lens = []
for chunk in self.split_large_text(text):
chunk_embeddings.append( get_embedding(chunk, engine=self.embedding_engine))
chunk_lens.append(len(chunk))
chunk_embeddings = np.average(chunk_embeddings, axis=0, weights=None)
chunk_embeddings = chunk_embeddings / np.linalg.norm(chunk_embeddings) # normalizes length to 1
chunk_embeddings = chunk_embeddings.tolist()
return chunk_embeddings
def get_feature_names_out(self):
return [str(x) for x in range(self.number_of_features)]
def get_feature_names(self):
return self.get_feature_names_out()
"""
Open AI initialization has to be done separately as follows:
1. During training read the parameters from user
a. from config
b. SQLite database
c. From Json file
"""
class setup_openai():
def __init__( self, config=None):
param_keys = ['api_type','api_key','api_base','api_version']
if isinstance(config, dict):
valid_params = {x:y for x,y in config.items() if x in param_keys}
self._update_params(valid_params)
elif self._is_sqlite():
self._update_params( self._get_cred_from_sqlite())
elif ((Path(__file__).parent.parent/'etc')/'openai.json').exists():
with open(((Path(__file__).parent.parent/'etc')/'openai.json'), 'r') as f:
import json
params = json.load(f)
valid_params = {x:y for x,y in params.items() if x in param_keys}
self._update_params(valid_params)
else:
raise ValueError('Open AI credentials are not provided.')
def _is_sqlite(self):
try:
from AION.appbe.sqliteUtility import sqlite_db
from AION.appbe.dataPath import DATA_DIR
db_dir = Path(DATA_DIR)/'sqlite'
db_file = 'config.db'
if (db_dir/db_file).exists():
sqlite_obj = sqlite_db(db_dir,db_file)
if sqlite_obj.table_exists('openai'):
return True
return False
except:
return False
def _get_cred_from_sqlite(self):
from AION.appbe.sqliteUtility import sqlite_db
from AION.appbe.dataPath import DATA_DIR
db_dir = Path(DATA_DIR)/'sqlite'
db_file = 'config.db'
sqlite_obj = sqlite_db(db_dir,db_file)
data = sqlite_obj.read_data('openai')[0]
param_keys = ['api_type','api_key','api_base','api_version']
return dict((x,y) for x,y in zip(param_keys,data))
def _update_params(self, valid_params):
for key, value in valid_params.items():
if key == 'api_type':
openai.api_type = value
elif key == 'api_key':
openai.api_key = value
elif key == 'api_base':
openai.api_base = value
elif key == 'api_version':
openai.api_version = value
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import logging
from distutils.util import strtobool
import numpy as np
import pandas as pd
from text import TextProcessing
from sklearn.preprocessing import FunctionTransformer
from sklearn.base import BaseEstimator, TransformerMixin
from pathlib import Path
external_model = None
external_model_type = None
def get_one_true_option(d, default_value):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
class textProfiler():
def __init__(self):
self.log = logging.getLogger('eion')
self.embedder = None
self.bert_embedder_size = 0
def textCleaning(self, textCorpus):
textProcessor = TextProcessing.TextProcessing()
textCorpus = textProcessor.transform(textCorpus)
return(textCorpus)
def sentense_encode(self, item):
return self.model.encode(item,show_progress_bar=False)
def get_embedding_size(self, model, config):
if model in config.keys():
config = config[model]
else:
config = {}
model = model.lower()
if model == 'glove':
size_map = {'default': 100, '50d': 50, '100d':100, '200d': 200, '300d':300}
size_enabled = get_one_true_option(config, 'default')
return size_map[size_enabled]
elif model == 'fasttext':
size_map = {'default': 300}
size_enabled = get_one_true_option(config, 'default')
return size_map[size_enabled]
elif model == 'latentsemanticanalysis':
size_map = {'default': 100, '50d': 50 |
, '100d':100, '200d': 200, '300d':300,'500d':500,'700d':700,'1000d':1000}
size_enabled = get_one_true_option(config, 'default')
return size_map[size_enabled]
elif model in ['tf_idf', 'countvectors']:
return int(config.get('maxFeatures', 2000))
else: # for word2vec
return 300
def cleaner(self, conf_json, pipeList, data_path=None):
cleaning_kwargs = {}
textCleaning = conf_json.get('textCleaning')
self.log.info("Text Preprocessing config: ",textCleaning)
cleaning_kwargs['fRemoveNoise'] = strtobool(textCleaning.get('removeNoise', 'True'))
cleaning_kwargs['fNormalize'] = strtobool(textCleaning.get('normalize', 'True'))
cleaning_kwargs['fReplaceAcronym'] = strtobool(textCleaning.get('replaceAcronym', 'False'))
cleaning_kwargs['fCorrectSpelling'] = strtobool(textCleaning.get('correctSpelling', 'False'))
cleaning_kwargs['fRemoveStopwords'] = strtobool(textCleaning.get('removeStopwords', 'True'))
cleaning_kwargs['fRemovePunctuation'] = strtobool(textCleaning.get('removePunctuation', 'True'))
cleaning_kwargs['fRemoveNumericTokens'] = strtobool(textCleaning.get('removeNumericTokens', 'True'))
cleaning_kwargs['normalizationMethod'] = get_one_true_option(textCleaning.get('normalizeMethod'),
'lemmatization').capitalize()
removeNoiseConfig = textCleaning.get('removeNoiseConfig')
if type(removeNoiseConfig) is dict:
cleaning_kwargs['removeNoise_fHtmlDecode'] = strtobool(removeNoiseConfig.get('decodeHTML', 'True'))
cleaning_kwargs['removeNoise_fRemoveHyperLinks'] = strtobool(removeNoiseConfig.get('removeHyperLinks', 'True'))
cleaning_kwargs['removeNoise_fRemoveMentions'] = strtobool(removeNoiseConfig.get('removeMentions', 'True'))
cleaning_kwargs['removeNoise_fRemoveHashtags'] = strtobool(removeNoiseConfig.get('removeHashtags', 'True'))
cleaning_kwargs['removeNoise_RemoveOrReplaceEmoji'] = 'remove' if strtobool(removeNoiseConfig.get('removeEmoji', 'True')) else 'replace'
cleaning_kwargs['removeNoise_fUnicodeToAscii'] = strtobool(removeNoiseConfig.get('unicodeToAscii', 'True'))
cleaning_kwargs['removeNoise_fRemoveNonAscii'] = strtobool(removeNoiseConfig.get('removeNonAscii', 'True'))
acronymConfig = textCleaning.get('acronymConfig')
if type(acronymConfig) is dict:
cleaning_kwargs['acronymDict'] = acronymConfig.get('acronymDict', None)
stopWordsConfig = textCleaning.get('stopWordsConfig')
if type(stopWordsConfig) is dict:
cleaning_kwargs['stopwordsList'] = stopWordsConfig.get('stopwordsList', '[]')
if isinstance(cleaning_kwargs['stopwordsList'], str):
if cleaning_kwargs['stopwordsList'] != '[]':
cleaning_kwargs['stopwordsList'] = cleaning_kwargs['stopwordsList'][1:-1].split(',')
else:
cleaning_kwargs['stopwordsList'] = []
cleaning_kwargs['extend_or_replace_stopwordslist'] = 'replace' if strtobool(stopWordsConfig.get('replace', 'True')) else 'extend'
removeNumericConfig = textCleaning.get('removeNumericConfig')
if type(removeNumericConfig) is dict:
cleaning_kwargs['removeNumeric_fIncludeSpecialCharacters'] = strtobool(removeNumericConfig.get('removeNumeric_IncludeSpecialCharacters', 'True'))
removePunctuationConfig = textCleaning.get('removePunctuationConfig')
if type(removePunctuationConfig) is dict:
cleaning_kwargs['fRemovePuncWithinTokens'] = strtobool(removePunctuationConfig.get('removePuncWithinTokens', 'False'))
cleaning_kwargs['fExpandContractions'] = strtobool(textCleaning.get('expandContractions', 'False'))
libConfig = textCleaning.get('libConfig')
if type(libConfig) is dict:
cleaning_kwargs['tokenizationLib'] = get_one_true_option(libConfig.get('tokenizationLib'), 'nltk')
cleaning_kwargs['lemmatizationLib'] = get_one_true_option(libConfig.get('lemmatizationLib'), 'nltk')
cleaning_kwargs['stopwordsRemovalLib'] = get_one_true_option(libConfig.get('stopwordsRemovalLib'), 'nltk')
if data_path:
cleaning_kwargs['data_path'] = data_path
textProcessor = TextProcessing.TextProcessing(**cleaning_kwargs)
pipeList.append(("TextProcessing",textProcessor))
textFeatureExtraction = conf_json.get('textFeatureExtraction')
if strtobool(textFeatureExtraction.get('pos_tags', 'False')):
pos_tags_lib = get_one_true_option(textFeatureExtraction.get('pos_tags_lib'), 'nltk')
posTagger = TextProcessing.PosTagging( pos_tags_lib, data_path)
pipeList.append(("posTagger",posTagger))
return pipeList
def embedding(self, conf_json, pipeList):
ngram_min = 1
ngram_max = 1
textFeatureExtraction = conf_json.get('textFeatureExtraction')
if strtobool(textFeatureExtraction.get('n_grams', 'False')):
n_grams_config = textFeatureExtraction.get("n_grams_config")
ngram_min = int(n_grams_config.get('min_n', 1))
ngram_max = int(n_grams_config.get('max_n', 1))
if (ngram_min < 1) or ngram_min > ngram_max:
ngram_min = 1
ngram_max = 1
invalidNgramWarning = 'WARNING : invalid ngram config.\\nUsing the default values min_n={}, max_n={}'.format(ngram_min, ngram_max)
self.log.info(invalidNgramWarning)
ngram_range_tuple = (ngram_min, ngram_max)
textConversionMethod = conf_json.get('textConversionMethod')
conversion_method = get_one_true_option(textConversionMethod, None)
embedding_size_config = conf_json.get('embeddingSize', {})
embedding_size = self.get_embedding_size(conversion_method, embedding_size_config)
if conversion_method.lower() == "countvectors":
vectorizer = TextProcessing.ExtractFeatureCountVectors( ngram_range=ngram_range_tuple,max_features=embedding_size)
pipeList.append(("vectorizer",vectorizer))
self.log.info('----------> Conversion Method: CountVectors')
elif conversion_method.lower() in ["fasttext","glove"]:
embedding_method = conversion_method
wordEmbeddingVecotrizer = TextProcessing.wordEmbedding(embedding_method, embedding_size)
pipeList.append(("vectorizer",wordEmbeddingVecotrizer))
self.log.info('----------> Conversion Method: '+str(conversion_method))
elif conversion_method.lower() == "openai":
from text.openai_embedding import embedding as openai_embedder
vectorizer = openai_embedder()
pipeList.append(("vectorizer",vectorizer))
self.log.info('----------> Conversion Method: '+str(conversion_method))
elif conversion_method.lower() == "sentencetransformer_distilroberta":
from sentence_transformers import SentenceTransformer
embedding_pretrained = {'model':'sentence-transformers/msmarco-distilroberta-base-v2','size': 768}
self.bert_embedder_size = embedding_pretrained['size']
self.model = SentenceTransformer(embedding_pretrained['model'])
self.embedder = FunctionTransformer(self.sentense_encode, feature_names_out = self.sentence_transformer_output)
pipeList.append(("vectorizer",self.embedder))
self.log.info('----------> Conversion Method: SentenceTransformer using msmarco_distilroberta')
elif conversion_method.lower() == "sentencetransformer_minilm":
from sentence_transformers import SentenceTransformer
embedding_pretrained = {'model':'sentence-transformers/all-MiniLM-L6-v2','size': 384}
self.bert_embedder_size = embedding_pretrained['size']
self.model = SentenceTransformer(embedding_pretrained['model'])
self.embedder = FunctionTransformer(self.sentense_encode, feature_names_out = self.sentence_transformer_output)
pipeList.append(("vectorizer",self.embedder))
self.log.info('----------> Conversion Method: SentenceTransformer using MiniLM-L6-v2')
elif conversion_method.lower() == "sentencetransformer_mpnet":
from sentence_transformers import SentenceTransformer
embedding_pretrained = {'model':'sentence-transformers/all-mpnet-base-v2','size': 768}
self.bert_embedder_size = embedding_pretrained['size']
self.model = SentenceTransformer(embedding_pretrained['model'])
self.embedder = FunctionTransformer(self.sentense_encode, feature_names_out = self.sentence_transformer_output)
pipeList.append(("vectorizer",self.embedder))
self.log.info('----------> Conversion Method: SentenceTransformer using mpnet-base-v2')
elif conversion_method.lower() == 'latentsemanticanalysis':
vectorizer = TextProcessing.ExtractFeatureTfIdfVectors(ngram_range=ngram_range_tuple)
pipeList.append(("vectorizer",vectorizer))
self.log.info('----------> Conversion Method: latentsemanticanalysis')
elif conversion_method.lower() == 'tf_idf':
vectorizer = TextProcessing.ExtractFeatureTfIdfVectors(ngram_range=ngram_range_tuple,max_features=embedding_size)
pipeList.append(("vectorizer",vectorizer))
self.log.info('----------> Conversion Method: TF_IDF')
else:
df1 = pd.DataFrame()
#df1['tokenize'] = textCorpus
self.log.info('----------> Conversion Method: '+str(conversion_method))
return pipeList
def sentence_transformer_output(self, transformer, names=None):
return [str(x) for x in range(self.bert_embedder_size)]
class textCombine(TransformerMixin):
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X):
if X.shape[1] > 1:
return np.array([" ".join(i) for i in X])
else:
if isinstance(X, np.ndarray):
return np.ndarray.flatten(X)
else:
return X
def get_pretrained_model_path():
try:
from appbe.dataPath import DATA_DIR
modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextProcessing'
except:
modelsPath = Path('aion')/'PreTrainedModels'/'TextProcessing'
if not modelsPath.exists():
modelsPath.mkdir(parents=True, exist_ok=True)
return modelsPath
def set_pretrained_model(pipe):
from text.Embedding import load_pretrained
import importlib.util
global external_model
global external_model_type
params = pipe.get_params()
model_name = params.get('text_process__vectorizer__preTrainedModel', None)
if model_name and model_name.lower() in ['fasttext','glove'] and not external_model:
if model_name == 'fasttext' and importlib.util.find_spec('fasttext'):
import fasttext
import fasttext.util
cwd = os.getcwd()
os.chdir(get_pretrained_model_path())
fasttext.util.download_model('en', if_exists='ignore')
external_model = fasttext.load_model('cc.en.300.bin')
os.chdir(cwd)
external_model_type = 'binary'
print('loaded fasttext binary')
else:
model_path = TextProcessing.checkAndDownloadPretrainedModel(model_name)
embed_size, external_model = load_pretrained(model_path)
external_model_type = 'vector'
print(f'loaded {model_name} vector')
pipe.set_params(text_process__vectorizer__external_model = external_model)
pipe.set_params(text_process__vectorizer__external_model_type = external_model_type)
def reset_pretrained_model(pipe, clear_mem=True):
global external_model
global external_model_type
params = pipe.get_params()
is_external_model = params.get('text_process__vectorizer__external_model', None)
if (isinstance(is_external_model, pd.DataFrame) and not is_external_model.empty) or is_external_model:
pipe.set_params(text_process__vectorizer__external_model = None)
pipe.set_params(text_process__vectorizer__external_model_type = None)
if clear_mem:
external_model = None
def release_pretrained_model():
global external_model
global external_model_type
external_model = None
external_model_type = None
<s> ''' |
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import sys
import logging
from collections import Counter
import spacy
import numpy as np
import pandas as pd
import nltk
from nltk.corpus import stopwords
from nltk import pos_tag
from nltk.tokenize import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
from textblob import TextBlob
from sklearn.feature_extraction.text import CountVectorizer
'''
nltk.download("punkt")
nltk.download("wordnet")
'''
stopWords = stopwords.words("english")
class ExploreTextData:
def __init__(self, logEnabled=False):
self.logEnabled = logEnabled
def __Log(self, logType="info", text=None):
if logType.lower() == "exception":
logging.exception( text)
elif self.logEnabled:
if logType.lower() == "info":
logging.info( text)
elif logType.lower() == "debug":
logging.debug( text)
def Describe(self, inputCorpus):
""" Generate descriptive statistics for length of documents.
Parameters
----------
inputCorpus: sequence of input documents where each document consists of paragraphs or sentences
Returns
-------
dict
Summary statistics of the Series or Dataframe provided.
"""
try:
self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name))
stat = {}
word_count = self.DocumentWordCount(inputCorpus)
stat['count'] = float(len(word_count))
stat['mean'] = float(word_count.mean())
stat['std'] = float(word_count.std())
stat['max'] = float(word_count.max())
stat['min'] = float(word_count.min())
return pd.DataFrame.from_dict(stat, orient='index')
except:
self.__Log("exception", sys.exc_info())
raise
def DocumentLength(self, inputCorpus):
""" Calculate the length of each document in corpus
Parameters
----------
inputCorpus: sequence of input documents where each document consists of paragraphs or sentences
Returns
-------
pandas.Series of {int}
series of length of documents
"""
try:
self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name))
return inputCorpus.str.len()
except:
self.__Log("exception", sys.exc_info())
raise
def DocumentWordCount(self, inputCorpus):
""" Calculate the number of words in each document in corpus
Parameters
----------
inputCorpus: sequence of input documents where each document consists of paragraphs or sentences
Returns
-------
pandas.Series of {int}
series of number of words in documents
"""
try:
self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name))
return inputCorpus.str.split().map(lambda x: len(x))
except:
self.__Log("exception", sys.exc_info())
raise
def AverageWordLength(self, inputCorpus):
""" Calculate the average length of words in each document in corpus
Parameters
----------
inputCorpus: sequence of input documents where each document consists of paragraphs or sentences
Returns
-------
pandas.Series of {double}
series of average length of words in documents
"""
try:
self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name))
return inputCorpus.str.split()\\
.apply(lambda x: [len(i) for i in x])\\
.map(lambda x: np.mean(x))
except:
self.__Log("exception", sys.exc_info())
raise
def StopWordsCount(self, inputCorpus):
""" Calculate the number of stopwords in each document in corpus
Parameters
----------
inputCorpus: sequence of input documents where each document consists of paragraphs or sentences
Returns
-------
pandas.Series of {int}
series of count of stopwords in documents
"""
try:
self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name))
stopWordsCount = []
inputCorpus = list(inputCorpus)
for doc in inputCorpus:
count = 0
for word in doc.split():
if word in stopWords:
count += 1
stopWordsCount.append(count)
return pd.Series(stopWordsCount)
except:
self.__Log("exception", sys.exc_info())
raise
def MostCommonWords(self, inputCorpus, num_of_words=40):
""" get the most common words in corpus
Parameters
----------
inputCorpus: sequence of input documents where each document consists of paragraphs or sentences
Returns
-------
Pandas.DataFrame{string, int}
Dataframe with columns "most_common_words" and "freq"
"""
try:
self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name))
new = inputCorpus.str.split()
new = new.values.tolist()
corpus = [word for i in new for word in i if word not in stopWords]
counter = Counter(corpus)
most = counter.most_common()
x, y = [], []
for word, count in most[: num_of_words + 1]:
x.append(word)
y.append(count)
return pd.DataFrame([x, y],index=['most_common_words', 'freq']).T
except:
self.__Log("exception", sys.exc_info())
raise
def NullCount(self, inputCorpus):
""" Calculate the number of null entries in corpus
Parameters
----------
inputCorpus: sequence of input documents where each document consists of paragraphs or sentences
Returns
-------
int
count of null entries in corpus
"""
try:
self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name))
return pd.Series(inputCorpus.isnull().sum())
except:
self.__Log("exception", sys.exc_info())
raise
def TopNgram(self, inputCorpus, ngram, num_of_words=10):
""" Get the top words from the ngrams
Parameters
----------
inputCorpus: sequence of input documents where each document consists of paragraphs or sentences
ngram: int
ngram required
num_of_words:int, optional
numbers of words to be returned
Returns
-------
Pandas.DataFrame{string, int}
Dataframe with columns "ngram_words" and "freq"
"""
try:
self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name))
words = []
for doc in inputCorpus:
word = [w for w in word_tokenize(doc) if (w not in stopWords)]
words.append(" ".join(word))
vec = CountVectorizer(ngram_range=(ngram, ngram)).fit(words)
bag_of_words = vec.transform(inputCorpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]
words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True)[:num_of_words]
words = []
frequency = []
for word, freq in words_freq:
words.append(word)
frequency.append(freq)
return pd.DataFrame([words, frequency],index=['ngram_words', 'freq']).T
except:
self.__Log("exception", sys.exc_info())
raise
def Polarity(self, inputCorpus):
""" Get the polarity of the text
Parameters
----------
inputCorpus: sequence of input documents where each document consists of paragraphs or sentences
Returns
-------
pandas.Series {double}
series of calculated polarity of the documents
"""
try:
self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name))
return inputCorpus.apply(lambda x: TextBlob(x).sentiment.polarity)
except:
self.__Log("exception", sys.exc_info())
raise
def ReadabilityScore(self, inputCorpus):
""" Get the Readability Score of the text
Parameters
----------
inputCorpus: sequence of input documents where each document consists of paragraphs or sentences
Returns
-------
pandas.Series {double}
series of calculated Readability Score of the documents
"""
import textstat
try:
self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name))
if isinstance(inputCorpus, pd.Series):
return pd.Series([textstat.flesch_reading_ease(text) for text in inputCorpus])
else:
return [textstat.flesch_reading_ease(inputCorpus)]
except:
self.__Log("exception", sys.exc_info())
raise
def TagEntityCount(self, inputCorpus):
""" Calculate the frequency of each entity present in documents
Parameters
----------
inputCorpus: sequence of input documents where each document consists of paragraphs or sentences
Returns
-------
Pandas.DataFrame{string, int}
Dataframe with columns "entity" and "freq"
"""
def ner(text):
doc = nlp(text)
return [X.label_ for X in doc.ents]
try:
self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name))
nlp = spacy.load("en_core_web_sm")
ent = inputCorpus.apply(lambda x: ner(x))
ent = [x for sub in ent for x in sub]
counter = Counter(ent)
count = counter.most_common()
x, y = map(list, zip(*count))
return pd.DataFrame([x, y],index=['entity', 'freq']).T
except:
self.__Log("exception", sys.exc_info())
raise
def MostCommonTokenPerEntity(self, inputCorpus, entity="GPE"):
""" Get the frequency of most common words corresponding to the specified entity in documents
Parameters
----------
inputCorpus: sequence of input documents where each document consists of paragraphs or sentences
entity: string, optional
name of the entity corresponding to which words are counted
Returns
-------
Pandas.DataFrame{string, int}
Dataframe with columns "token" and "freq"
"""
def ner(text, ent):
doc = nlp(text)
return [X.text for X in doc.ents if X.label_ == ent]
try:
self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name))
nlp = spacy.load("en_core_web_sm")
gpe = inputCorpus.apply(lambda x: ner(x, entity.upper()))
gpe = [i for x in gpe for i in x]
counter = Counter(gpe)
x, y = map(list, zip(*counter.most_common(10)))
return pd.DataFrame([x, y],index=['token', 'freq']).T
except:
self.__Log("exception", sys.exc_info())
raise
def MostCommonPosTag(self, inputCorpus):
""" Get the frequency of most common POS tag present in documents
Parameters
----------
inputCorpus: sequence of input documents where each document consists of paragraphs or sentences
Returns
-------
Pandas.DataFrame{string, int}
Dataframe with columns "postag" and "freq"
"""
def pos(text):
pos = pos_tag(word_tokenize(text))
pos = list(map(list, zip(*pos)))[1]
return pos
try:
self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name))
tags = inputCorpus.apply(lambda x: pos(x))
tags = [x for l in tags for x in l]
counter = Counter(tags)
x, y = list(map(list, zip(*counter.most_common(7))))
return pd.DataFrame([x, y],index=['postag', 'freq']).T
except:
self.__Log("exception", sys.exc_info())
raise
def MostCommonWordsInPOSTag(self, inputCorpus, tag="NN"):
""" Get the frequency of most common words related to specified POS tag present in documents
Parameters
----------
inputCorpus: sequence of input documents where each document consists of paragraphs or sentences
tag: string, optional
POS tag corresponding to which words frequency will be calculated
Returns
-------
Pandas.DataFrame{string, int}
Dataframe with columns "words" and "freq"
"""
def get_POSTag(text, tag):
adj = []
pos = pos_tag(word_tokenize(text))
for word, tg in pos:
if tg == tag:
adj.append(word)
return adj
try:
self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name))
words = inputCorpus.apply(lambda x: get_POSTag(x, tag.upper()))
words = [x for l in words for x in l]
counter = Counter(words)
x = []
y = []
if len(counter):
x, y = list(map(list, zip(*counter.most_common(7))))
return pd.DataFrame([x, y],index=['words', 'freq']).T
except:
self.__Log("exception", sys.exc_info())
raise
def __preprocessData(self, inputCorpus):
""" Prepare the data for topic modelling
"""
try:
self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name))
corpus = []
lem = WordNetLemmatizer()
for doc in inputCorpus:
words = [w for w in word_tokenize(doc) if (w not in stopWords)]
words = [lem.lemmatize(w) for w in words if len(w) > 2]
corpus.append(words)
return corpus
except:
self.__Log("exception", sys.exc_info())
raise<s> import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__)))) |
from .cat_type_str import cat_to_str
__version__ = "1.0"<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import pandas as pd
class cat_to_str:
def __init__(self):
pass
def convert(self, x):
return pd.DataFrame(x).astype(str)
<s> import pandas as pd
def dataGarbageValue(dataFrame,datetimeFeature):
if datetimeFeature == '' or datetimeFeature.lower() == 'na':
return 'Success',''
try:
features = datetimeFeature.split(',')
for dtfeature in features:
dataFrame[dtfeature] = pd.to_datetime(dataFrame[dtfeature],errors='coerce')
if pd.isnull(dataFrame[dtfeature]).sum() > 0:
return 'Error',dtfeature+' feature have some garbage values'
except Exception as e:
print(e)
return 'Error', 'Datetime features validation error'
return 'Success',''<s> import os
from pathlib import Path
import pandas as pd
import numpy as np
import json
def listToStringWithDelimiter(s, vectorDBFeatureDelimitInDoc):
#lenght
sLen = len(s)
# initialize an empty string
str1 = ""
# traverse in the string
for i in range(0, sLen-1):
str1 +=str(s[i])+vectorDBFeatureDelimitInDoc
str1 +=str(s[sLen-1])
# return string
return str1
def save_csv(df, fileLocation, encoding=None):
#import pdb;pdb.set_trace();
try:
parent_dir = Path(fileLocation).parent
parent_dir.mkdir(parents=True, exist_ok=True)
if encoding:
df.to_csv(fileLocation, encoding=encoding, index=False,)
else:
df.to_csv(fileLocation, index=False)
return True, ''
except Exception as e:
print(e)
return False, str(e)
def save_csv_compressed(df, fileLocation, encoding=None):
try:
parent_dir = Path(fileLocation).parent
parent_dir.mkdir(parents=True, exist_ok=True)
if encoding:
df.to_csv(fileLocation, encoding=encoding, index=False, compression='gzip')
else:
df.to_csv(fileLocation, index=False, compression='gzip')
return True, ''
except Exception as e:
print(e)
return False, str(e)
def read_df(fileLocation,encoding=None, nrows=None):
parent_dir = Path(fileLocation).parent
if parent_dir.exists():
try:
if encoding and nrows:
df = pd.read_csv(fileLocation, encoding=encoding,nrows=nrows,encoding_errors= 'replace')
elif encoding:
df = pd.read_csv(fileLocation, encoding=encoding,encoding_errors= 'replace')
elif nrows:
df = pd.read_csv(fileLocation, nrows=nrows)
return True, df
except Exception as e:
df = pd.read_csv(fileLocation, encoding="utf-8",encoding_errors= 'replace')
print(e)
return True,df
else:
print("parent fails")
def read_df_compressed(fileLocation, encoding=None, nrows=None):
parent_dir = Path(fileLocation).parent
if parent_dir.exists():
try:
if encoding:
df = pd.read_csv(fileLocation, encoding=encoding, compression="gzip",encoding_errors= 'replace')
if nrows:
df = pd.read_csv(fileLocation, nrows=nrows, compression="gzip")
else:
df = pd.read_csv(fileLocation, encoding="utf-8", compression="gzip",encoding_errors= 'replace')
return True, df
except Exception as e:
df = pd.read_csv(fileLocation, encoding="utf-8",encoding_errors= 'replace')
print(e)
return True,df
else:
print("parent fails")
def save_chromadb(df, config_obj, fileLocation, modelFeatures):
import chromadb
#from chromadb.config import Settings
try:
parent_dir = Path(fileLocation).parent
parent_dir.mkdir(parents=True, exist_ok=True)
vectorDBFeatureDelimitInDoc = config_obj.getVectorDBFeatureDelimitInDoc()
persist_directory = os.path.dirname(os.path.abspath(fileLocation))
# client = chromadb.Client(
# Settings(
# persist_directory=persist_directory,
# chroma_db_impl="duckdb+parquet",
# )
# )
client = chromadb.PersistentClient(path=persist_directory)
# Create a new chroma collection
collection_name = os.path.basename(fileLocation).split('/')[-1]
collection_name = collection_name.replace('.csv', '')
collection_name = collection_name + 'VecDB'
collection = client.create_collection(
name=collection_name,
metadata={"hnsw:space": "cosine"}
)
features = modelFeatures.split(",")
dftxt = pd.concat([df.pop(x) for x in features], axis=1)
stepSize = 500
for i in range(0, len(df),stepSize):
start = i
end = i+ stepSize
dfembdary = df.iloc[start:end].to_numpy()
dftxtary = dftxt.iloc[start:end].to_numpy()
idxary = df.iloc[start:end].index.values
#convert to string
idxary = [str(x) for x in idxary]
dftxtary = [listToStringWithDelimiter(x.tolist(), vectorDBFeatureDelimitInDoc) for x in dftxtary]
collection.add(
embeddings=dfembdary.tolist(),
ids=idxary,
documents= dftxtary
)
client.persist()
return True, ''
except Exception as e:
return False, str(e)
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> import joblib
import pandas as pd
import sys
import math
import time
import pandas as pd
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score
from sklearn.metrics import r2_score,mean_absolute_error,mean_squared_error
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.svm import SVC
from sklearn.linear_model import LinearRegression
import argparse
import json
def mltesting(modelfile,datafile,features,target):
model = joblib.load(modelfile)
ProblemName = model.__class__.__name__
if ProblemName in ['LogisticRegression','SGDClassifier','SVC','DecissionTreeClassifier','RandomForestClassifier','GaussianNB','KNeighborsClassifier','DecisionTreeClassifier','GradientBoostingClassifier','XGBClassifier','LGBMClassifier','CatBoostClassifier']:
Problemtype = 'Classification'
elif ProblemName in ['LinearRegression','Lasso','Ridge','DecisionTreeRegressor','RandomForestRegressor','GradientBoostingRegressor','XGBRegressor','LGBMRegressor','CatBoostRegressor']:
Problemtype = 'Regression'
else:
Problemtype = 'Unknown'
if Problemtype == 'Classification':
Params = model.get_params()
try:
df = pd.read_csv(datafile,encoding='utf-8',skipinitialspace = True)
if ProblemName == 'LogisticRegression' or ProblemName == 'DecisionTreeClassifier' or ProblemName == 'RandomForestClassifier' or ProblemName == 'GaussianNB' or ProblemName == 'KNeighborsClassifier' or ProblemName == 'GradientBoostingClassifier' or ProblemName == 'SVC':
features = model.feature_names_in_
elif ProblemName == 'XGBClassifier':
features = model.get_booster().feature_names
elif ProblemName == 'LGBMClassifier':
features = model.feature_name_
elif ProblemName == 'CatBoostClassifier':
features = model.feature_names_
modelfeatures = features
dfp = df[modelfeatures]
tar = target
target = df[tar]
predic = model.predict(dfp)
output = {}
matrixconfusion = pd.DataFrame(confusion_matrix(predic,target))
matrixconfusion = matrixconfusion.to_json(orient='index')
classificationreport = pd.DataFrame(classification_report(target,predic,output_dict=True)).transpose()
classificationreport = round(classificationreport,2)
classificationreport = classificationreport.to_json(orient='index')
output["Precision"] = "%.2f" % precision_score(target, predic,average='weighted')
output["Recall"] = "%.2f" % recall_score(target, predic,average='weighted')
output["Accuracy"] = "%.2f" % accuracy_score(target, predic)
output["ProblemName"] = ProblemName
output["Status"] = "Success"
output["Params"] = Params
output["Problemtype"] = Problemtype
output["Confusionmatrix"] = matrixconfusion
output["classificationreport"] = classificationreport
# import statistics
# timearray = []
# for i in range(0,5):
# start = time.time()
# predic1 = model.predict(dfp.head(1))
# end = time.time()
# timetaken = (round((end - start) * 1000,2),'Seconds')
# timearray.append(timetaken)
# print(timearray)
start = time.time()
for i in range(0,5):
predic1 = model.predict(dfp.head(1))
end = time.time()
timetaken = (round((end - start) * 1000,2),'Seconds')
# print(timetaken)
start1 = time.time()
for i in range(0,5):
predic2 = model.predict(dfp.head(10))
end1 = time.time()
timetaken1 = (round((end1 - start1) * 1000,2) ,'Seconds')
# print(timetaken1)
start2 = time.time()
for i in range(0,5):
predic3 = model.predict(dfp.head(100))
end2 = time.time()
timetaken2 = (round((end2 - start2) * 1000,2) ,'Seconds')
# print(timetaken2)
output["onerecord"] = timetaken
output["tenrecords"] = timetaken1
output["hundrecords"] = timetaken2
print(json.dumps(output))
except Exception as e:
output = {}
output['Problemtype']='Classification'
output['Status']= "Fail"
output["ProblemName"] = ProblemName
output["Msg"] = 'Detected Model : {} \\\\n Problem Type : Classification \\\\n Error : {}'.format(ProblemName, str(e).replace('"','//"').replace('\\n', '\\\\n'))
print(output["Msg"])
print(json.dumps(output))
elif Problemtype == 'Regression':
Params = model.get_params()
try:
df = pd.read_csv(datafile,encoding='utf-8',skipinitialspace = True)
if ProblemName == 'LinearRegression' or ProblemName == 'Lasso' or ProblemName == 'Ridge' or ProblemName == 'DecisionTreeRegressor' or ProblemName == 'RandomForestRegressor' or ProblemName == 'GaussianNB' or ProblemName == 'KNeighborsRegressor' or ProblemName == 'GradientBoostingRegressor':
features = model.feature_names_in_
elif ProblemName == 'XGBRegressor':
features = model.get_booster().feature_names
elif ProblemName == 'LGBMRegressor':
features = model.feature_name_
elif ProblemName == 'CatBoostRegressor':
features = model.feature_names_
modelfeatures = features
dfp = df[modelfeatures]
tar = target
target = df[tar]
predict = model.predict(dfp)
mse = mean_squared_error(target, predict)
mae = mean_absolute_error(target, predict)
rmse = math.sqrt(mse)
r2 = r2_score(target,predict,multioutput='variance_weighted')
output = {}
output["MSE"] = "%.2f" % mean_squared_error(target, predict)
output["MAE"] = "%.2f" % mean_absolute_error(target, predict)
output["RMSE"] = "%.2f" % math.sqrt(mse)
output["R2"] = "%.2f" %r2_score(target,predict,multioutput='variance_weighted')
output["ProblemName"] = ProblemName
output["Problemtype"] = Problemtype
output["Params"] = Params
output['Status']='Success'
start = time.time()
predic1 = model.predict(dfp.head(1))
end = time.time()
timetaken = (round((end - start) * 1000,2) ,'Seconds')
# print(timetaken)
start1 = time.time()
predic2 = model.predict(dfp.head(10))
end1 = time.time()
timetaken1 = (round((end1 - start1) * 1000,2),'Seconds')
# print(timetaken1)
start2 = time.time()
predic3 = model.predict(dfp.head(100))
end2 = time.time()
timetaken2 = (round((end2 - start2) * 1000,2) ,'Seconds')
# print(timetaken2)
output["onerecord"] = timetaken
output["tenrecords"] = timetaken1
output["hundrecords"] = timetaken2
print(json.dumps(output))
except Exception as e:
output = {}
output['Problemtype']='Regression |
'
output['Status']='Fail'
output["ProblemName"] = ProblemName
output["Msg"] = 'Detected Model : {} \\\\n Problem Type : Regression \\\\n Error : {}'.format(ProblemName, str(e).replace('"','//"').replace('\\n', '\\\\n'))
print(json.dumps(output))
else:
output = {}
output['Problemtype']='Unknown'
output['Status']='Fail'
output['Params'] = ''
output["ProblemName"] = ProblemName
output["Msg"] = 'Detected Model : {} \\\\n Error : {}'.format(ProblemName, 'Model not supported')
print(json.dumps(output))
return(json.dumps(output))
def baseline_testing(modelFile,csvFile,features,target):
features = [x.strip() for x in features.split(',')]
return mltesting(modelFile,csvFile,features,target)<s><s><s><s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
from importlib.metadata import version
import sys
import os
def requirementfile(deploy_path,model,textFeatures,learner_type):
print('hola', model)
modules = ['pandas','numpy','alibi','matplotlib','joblib','shap','ipython','category_encoders','scikit-learn','word2number','flask_restful','evidently','Flask-Cors']
requires = ''
for mod in modules:
requires += f"{mod}=={version(mod)}\\n"
if len(textFeatures) > 0:
tmodules = ['spacy','nltk','textblob','demoji','beautifulsoup4','text-unidecode','pyspellchecker','contractions','protobuf']
for mod in tmodules:
requires += f"{mod}=={version(mod)}\\n"
if model == 'Extreme Gradient Boosting (XGBoost)':
mmodules = ['xgboost']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model == 'Light Gradient Boosting (LightGBM)':
mmodules = ['lightgbm']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model == 'Categorical Boosting (CatBoost)':
mmodules = ['catboost']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model.lower() == 'arima':
mmodules = ['pmdarima']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model.lower() == 'fbprophet':
mmodules = ['prophet']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model.lower() == 'lstm' or model.lower() == 'mlp' or learner_type =='DL':
mmodules = ['tensorflow']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model.lower() in ['cox', 'kaplanmeierfitter']: #bug 12833
mmodules = ['lifelines']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model.lower() == 'sentencetransformer': #bug 12833
mmodules = ['sentence_transformers']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
filename = os.path.join(deploy_path,'requirements.txt')
f = open(filename, "wb")
f.write(str(requires).encode('utf8'))
f.close()
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import platform
import json
import shutil
import logging
import sys
from AionConfigManager import AionConfigManager
from sklearn.externals import joblib
class edgeformats:
def __init__(self,deploy_path):
self.deploy_path = deploy_path
self.edge_deploy_path = os.path.join(deploy_path,"edge")
os.mkdir(self.edge_deploy_path)
def converttoedgedeployment(self,saved_model,edge_format,xtrain,model_type,iterName,iterVersion,features,profiled_data_file):
if edge_format == 'onnx':
from skl2onnx import convert_sklearn
from skl2onnx.common.data_types import FloatTensorType
xtrain = xtrain[features]
initial_type = [('float_input', FloatTensorType([None, xtrain.shape[1]]))]
filename = os.path.join(self.deploy_path,saved_model)
loaded_model = joblib.load(filename)
onx = convert_sklearn(loaded_model, initial_types=initial_type)
onnx_filename = os.path.join(self.edge_deploy_path, model_type + '_' + iterName + '_' + iterVersion + '.onnx')
with open(onnx_filename, "wb") as f:
f.write(onx.SerializeToString())
self.createedgeruntimeFile(onnx_filename,profiled_data_file,features)
def createedgeruntimeFile(self,onnx_filename,datafilepath,features):
runtimefilecontent = ''
runtimefilecontent += 'import pandas'
runtimefilecontent += '\\n'
runtimefilecontent += 'import numpy'
runtimefilecontent += '\\n'
runtimefilecontent += 'import sys'
runtimefilecontent += '\\n'
runtimefilecontent += 'import onnxruntime as rt'
runtimefilecontent += '\\n'
runtimefilecontent += 'def onnx_runtime_validation():'
runtimefilecontent += '\\n'
runtimefilecontent += ' modelfile = r"'+str(onnx_filename)+'"'
runtimefilecontent += '\\n'
runtimefilecontent += ' datafile = r"'+str(datafilepath)+'"'
runtimefilecontent += '\\n'
runtimefilecontent += ' dataframe = pandas.read_csv(datafile)'
runtimefilecontent += '\\n'
runtimefilecontent += ' dataframe = dataframe['+str(features)+']'
runtimefilecontent += '\\n'
runtimefilecontent += ' df = dataframe.head(8)'
runtimefilecontent += '\\n'
runtimefilecontent += ' dataset = df.values'
runtimefilecontent += '\\n'
runtimefilecontent += ' sess = rt.InferenceSession(modelfile)'
runtimefilecontent += '\\n'
runtimefilecontent += ' input_name = sess.get_inputs()[0].name'
runtimefilecontent += '\\n'
runtimefilecontent += ' label_name = sess.get_outputs()[0].name'
runtimefilecontent += '\\n'
runtimefilecontent += ' inputsize=sess.get_inputs()[0].shape'
runtimefilecontent += '\\n'
runtimefilecontent += ' XYZ = dataset[:,0:inputsize[1]].astype(float)'
runtimefilecontent += '\\n'
runtimefilecontent += ' pred_onx = sess.run([label_name], {input_name: XYZ.astype(numpy.float32)[0:8]})[0]'
runtimefilecontent += '\\n'
runtimefilecontent += ' df[\\'predictions\\'] = pred_onx'
runtimefilecontent += '\\n'
runtimefilecontent += ' result = df.to_json(orient="records")'
runtimefilecontent += '\\n'
runtimefilecontent += ' return(result)'
runtimefilecontent += '\\n'
runtimefilecontent += 'if __name__ == "__main__":'
runtimefilecontent += '\\n'
runtimefilecontent += ' output = onnx_runtime_validation()'
runtimefilecontent += '\\n'
runtimefilecontent += ' print("predictions:",output)'
filename = os.path.join(self.edge_deploy_path,'onnxvalidation.py')
f = open(filename, "w")
f.write(str(runtimefilecontent))
f.close()
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import platform
import json
import shutil
import logging
class outputFormatter:
def __init__(self):
self.log = logging.getLogger('eion')
self.log.info('========> Inside Output Formatter')
def crate_output_format_file(self,deploy_path,learner_type,modelType,model,output_label,threshold,trained_data_file,dictDiffCount,targetFeature,features,datetimeFeature):
self.output_formatfile = 'import json'
self.output_formatfile += '\\n'
self.output_formatfile += 'import numpy as np'
self.output_formatfile += '\\n'
self.output_formatfile += 'import pandas as pd'
self.output_formatfile += '\\n'
self.output_formatfile += 'import os'
self.output_formatfile += '\\n'
self.output_formatfile += 'from pathlib import Path'
self.output_formatfile += '\\n'
if((model.lower() in ['autoencoder','dbscan']) and modelType.lower()=="anomaly_detection"):
self.output_formatfile += 'from script.aion_granularity import aion_gettimegranularity'
self.output_formatfile += '\\n'
self.output_formatfile += 'class output_format(object):'
self.output_formatfile += '\\n'
if(model == 'VAR'):
self.output_formatfile += ' def invertTransformation(self,predictions):'
self.output_formatfile += '\\n'
self.output_formatfile += ' datasetdf = pd.read_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)),"..","data","trainingdata.csv"))'
self.output_formatfile += '\\n'
self.output_formatfile += ' dictDiffCount = '+str(dictDiffCount)
self.output_formatfile += '\\n'
self.output_formatfile += ' targetFeature = "'+str(targetFeature)+'"'
self.output_formatfile += '\\n'
self.output_formatfile += ' columns = targetFeature.split(",")'
self.output_formatfile += '\\n'
self.output_formatfile += ' pred = pd.DataFrame(index=range(0,len(predictions)),columns=columns)'
self.output_formatfile += '\\n'
self.output_formatfile += ' for j in range(0,len(columns)):'
self.output_formatfile += '\\n'
self.output_formatfile += ' for i in range(0, len(predictions)):'
self.output_formatfile += '\\n'
self.output_formatfile += ' pred.iloc[i][j] = round(predictions[i][j],2)'
self.output_formatfile += '\\n'
self.output_formatfile += ' prediction = pred'
self.output_formatfile += '\\n'
self.output_formatfile += ' for col in columns:'
self.output_formatfile += '\\n'
self.output_formatfile += ' if col in dictDiffCount:'
self.output_formatfile += '\\n'
self.output_formatfile += ' if dictDiffCount[col]==2:'
self.output_formatfile += '\\n'
self.output_formatfile += ' prediction[col] = (datasetdf[col].iloc[-1]-datasetdf[col].iloc[-2]) + prediction[col].cumsum()'
self.output_formatfile += '\\n'
self.output_formatfile += ' prediction[col] = datasetdf[col].iloc[-1] + prediction[col].cumsum()'
self.output_formatfile += '\\n'
self.output_formatfile += ' prediction = pred'
self.output_formatfile += '\\n'
self.output_formatfile += ' return(prediction)'
self.output_formatfile += '\\n'
self.log.info("op:modelType: \\n"+str(modelType))
if((model.lower() in ['autoencoder','dbscan']) and modelType.lower()=="anomaly_detection"):
# if modelType == 'anomaly_detection':
self.output_formatfile += ' def find_point_subsequence_anomalies(self,datetime_column,dataframe=None):'
self.output_formatfile += '\\n'
self.output_formatfile += ' try:'
self.output_formatfile += '\\n'
self.output_formatfile += ' dataframe[datetime_column] = pd.to_datetime(dataframe[datetime_column]) '
self.output_formatfile += '\\n'
self.output_formatfile += ' aion_gettimegranularity_obj=aion_gettimegranularity(dataframe,datetime_column) '
self.output_formatfile += '\\n'
self.output_formatfile += ' anomaly_info_df=aion_gettimegranularity_obj.get_granularity() '
self.output_formatfile += '\\n'
self.output_formatfile += ' except Exception as e:'
self.output_formatfile += '\\n'
self.output_formatfile += ' print(f"find_point_subsequence_anomalies,: aion_gettimegranularity err msg:{e} ")\\n'
self.output_formatfile += ' return anomaly_info_df'
self.output_formatfile += '\\n'
if((model.lower() in ['autoencoder','dbscan']) and modelType.lower()=="anomaly_detection"):
if (datetimeFeature!='' and datetimeFeature!='NA'):
self.output_formatfile += ' def apply_output_format(self,df,modeloutput,datetimeFeature):'
self.output_formatfile += '\\n'
else:
self.output_formatfile += ' def apply_output_format(self,df,modeloutput):'
self.output_formatfile += '\\n'
else:
self.output_formatfile += ' def apply_output_format(self,df,modeloutput):'
self.output_formatfile += '\\n'
if modelType.lower() == 'classification':
self.output_formatfile += ' modeloutput = |
round(modeloutput,2)'
self.output_formatfile += '\\n'
if(learner_type == 'ImageClassification'):
if(str(output_label) != '{}'):
inv_mapping_dict = {v: k for k, v in output_label.items()}
self.output_formatfile += ' le_dict = '+ str(inv_mapping_dict)
self.output_formatfile += '\\n'
self.output_formatfile += ' predictions = []'
self.output_formatfile += '\\n'
self.output_formatfile += ' for x in modeloutput:'
self.output_formatfile += '\\n'
self.output_formatfile += ' x = le_dict[x]'
self.output_formatfile += '\\n'
self.output_formatfile += ' predictions.append(x)'
self.output_formatfile += '\\n'
else:
self.output_formatfile += ' predictions=modeloutput'
self.output_formatfile += '\\n'
self.output_formatfile += ' df[\\'prediction\\'] = predictions'
self.output_formatfile += '\\n'
self.output_formatfile += ' outputjson = df.to_json(orient=\\'records\\')'
self.output_formatfile += '\\n'
self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}'
self.output_formatfile += '\\n'
elif(learner_type == 'Text Similarity'):
self.output_formatfile += ' df[\\'prediction\\'] = np.where(modeloutput > '+str(threshold)+',1,0)'
self.output_formatfile += '\\n'
self.output_formatfile += ' df[\\'probability\\'] = modeloutput'
self.output_formatfile += '\\n'
self.output_formatfile += ' outputjson = df.to_json(orient=\\'records\\',double_precision=2)'
self.output_formatfile += '\\n'
self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}'
self.output_formatfile += '\\n'
elif(learner_type == 'TS'):
if(model == 'VAR'):
self.output_formatfile += ' modeloutput = self.invertTransformation(modeloutput)'
self.output_formatfile += '\\n'
self.output_formatfile += ' modeloutput = modeloutput.to_json(orient=\\'records\\',double_precision=2)'
self.output_formatfile += '\\n'
self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(modeloutput)}'
elif(model.lower() == 'fbprophet'):
self.output_formatfile += ' modeloutput = modeloutput.to_json(orient=\\'records\\')'
self.output_formatfile += '\\n'
self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(modeloutput)}'
elif((model.lower() == 'lstm' or model.lower() == 'mlp') and len(features) >= 1):
self.output_formatfile += ' modeloutput = modeloutput.round(2)\\n'
self.output_formatfile += ' modeloutput = modeloutput.to_json(orient=\\'records\\')\\n'
self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(modeloutput)}\\n'
else:
self.output_formatfile += ' modeloutput = modeloutput.round(2)'
self.output_formatfile += '\\n'
self.output_formatfile += ' modeloutput = json.dumps(modeloutput.tolist())'
self.output_formatfile += '\\n'
self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":eval(modeloutput)}'
self.output_formatfile += '\\n'
elif(learner_type in ['RecommenderSystem','similarityIdentification','contextualSearch']):
self.output_formatfile += '\\n'
self.output_formatfile += ' df[\\'prediction\\'] = modeloutput'
self.output_formatfile += '\\n'
self.output_formatfile += ' outputjson = df.to_json(orient=\\'records\\',double_precision=2)'
self.output_formatfile += '\\n'
self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}'
self.output_formatfile += '\\n'
else:
if(modelType == 'Classification' or modelType == 'TLClassification' or modelType == 'anomaly_detection'):
if(modelType == 'Classification' or modelType == 'TLClassification' or modelType == 'anomaly_detection'):
if(str(output_label) != '{}'):
inv_mapping_dict = {v: k for k, v in output_label.items()}
self.output_formatfile += ' le_dict = '+ str(inv_mapping_dict)
self.output_formatfile += '\\n'
'''
if(model in ['SGDClassifier']):
self.output_formatfile += ' modeloutput = modeloutput.replace({"predict_class": le_dict})'
else:
self.output_formatfile += ' modeloutput = modeloutput.rename(columns=le_dict)'
'''
if modelType != 'anomaly_detection':
self.output_formatfile += ' modeloutput = modeloutput.rename(columns=le_dict)'
self.output_formatfile += '\\n'
if(threshold != -1):
'''
if(model in ['SGDClassifier']):
self.output_formatfile += ' df[\\'prediction\\'] = np.where(modeloutput[\\'probability\\'] > '+str(threshold)+',1,0)'
self.output_formatfile += '\\n'
self.output_formatfile += ' df[\\'probability\\'] = modeloutput[\\'probability\\']'
self.output_formatfile += '\\n'
self.output_formatfile += ' df[\\'remarks\\'] = ""'
self.output_formatfile += '\\n'
else:
self.output_formatfile += ' predictedData = modeloutput.iloc[:,1]'
self.output_formatfile += '\\n'
self.output_formatfile += ' df[\\'prediction\\'] = np.where(predictedData > '+str(threshold)+',modeloutput.columns[1],modeloutput.columns[0])'
self.output_formatfile += '\\n'
self.output_formatfile += ' df[\\'probability\\'] = np.where(df[\\'prediction\\'] == modeloutput.columns[1],modeloutput.iloc[:,1],modeloutput.iloc[:,0])'
self.output_formatfile += '\\n'
self.output_formatfile += ' df[\\'remarks\\'] = modeloutput.apply(lambda x: x.to_json(double_precision=2), axis=1)'
self.output_formatfile += '\\n'
'''
self.output_formatfile += ' predictedData = modeloutput.iloc[:,1]'
self.output_formatfile += '\\n'
self.output_formatfile += ' df[\\'prediction\\'] = np.where(predictedData > '+str(threshold)+',modeloutput.columns[1],modeloutput.columns[0])'
self.output_formatfile += '\\n'
self.output_formatfile += ' df[\\'probability\\'] = np.where(df[\\'prediction\\'] == modeloutput.columns[1],modeloutput.iloc[:,1],modeloutput.iloc[:,0])'
self.output_formatfile += '\\n'
self.output_formatfile += ' df[\\'remarks\\'] = modeloutput.apply(lambda x: x.to_json(double_precision=2), axis=1)'
self.output_formatfile += '\\n'
else:
'''
if(model in ['SGDClassifier']):
self.output_formatfile += ' df[\\'prediction\\'] = modeloutput[\\'predict_class\\']'
self.output_formatfile += '\\n'
self.output_formatfile += ' df[\\'probability\\'] = ""'
self.output_formatfile += '\\n'
self.output_formatfile += ' df[\\'remarks\\'] = "NA"'
self.output_formatfile += '\\n'
else:
self.output_formatfile += ' df[\\'prediction\\'] = modeloutput.idxmax(axis=1)'
self.output_formatfile += '\\n'
self.output_formatfile += ' df[\\'probability\\'] = modeloutput.max(axis=1)'
self.output_formatfile += '\\n'
self.output_formatfile += ' df[\\'remarks\\'] = modeloutput.apply(lambda x: x.to_json(double_precision=2), axis=1)'
self.output_formatfile += '\\n'
'''
if modelType == 'anomaly_detection':
# if (model.lower()=='autoencoder'):
if model.lower() in ['autoencoder']:
if (datetimeFeature != '' and datetimeFeature.lower() != 'na'):
self.output_formatfile += ' df[modeloutput.columns] = modeloutput\\n'
self.output_formatfile += ' anomaly_df=df[df[\\'anomaly\\'] == True]\\n'
self.output_formatfile += ' anomaly_prediction_df=self.find_point_subsequence_anomalies(datetimeFeature,anomaly_df)\\n'
self.output_formatfile += ' new_dir = str(Path(__file__).parent.parent/\\'data\\')\\n'
self.output_formatfile += ' anomaly_prediction_df.to_csv(f"{new_dir}/anomaly_data.csv")\\n'
self.output_formatfile += ' try:\\n'
self.output_formatfile += ' anomaly_prediction_df[datetimeFeature]=pd.to_datetime(anomaly_prediction_df[datetimeFeature])\\n'
self.output_formatfile += ' df[datetimeFeature]=pd.to_datetime(df[datetimeFeature])\\n'
self.output_formatfile += ' anomaly_prediction_df.drop("Time_diff",axis=1,inplace=True)\\n'
self.output_formatfile += ' except:\\n'
self.output_formatfile += ' pass\\n'
self.output_formatfile += ' try:\\n'
self.output_formatfile += ' df_out = pd.merge(df, anomaly_prediction_df, on=df.columns.values.tolist(), how=\\'left\\')\\n'
self.output_formatfile += ' df_out[\\'anomaly\\'].replace([\\'None\\', \\'NaN\\', np.nan], "Normal", inplace=True)\\n'
self.output_formatfile += ' df_out[\\'anomalyType\\'].replace([\\'None\\', \\'NaN\\', np.nan], "Normal", inplace=True)\\n'
self.output_formatfile += ' df_out.to_csv(f"{new_dir}/overall_ad_output.csv") \\n'
self.output_formatfile += ' df_out[datetimeFeature]=df_out[datetimeFeature].astype(str) \\n'
self.output_formatfile += ' df_out.drop("time_diff",axis=1,inplace=True)\\n'
self.output_formatfile += ' except Exception as e:\\n'
self.output_formatfile += ' print("anomaly data updated issue",e)\\n'
self.output_formatfile += ' df_out[datetimeFeature]=df_out[datetimeFeature].astype(str)\\n'
self.output_formatfile += ' df=df_out \\n'
else:
self.output_formatfile += ' df[modeloutput.columns] = modeloutput\\n'
elif (model.lower()=='dbscan'):
if (datetimeFeature != '' and datetimeFeature.lower() != 'na'):
self.output_formatfile += ' df[\\'anomaly\\'] = modeloutput[\\'cluster\\']== -1\\n'
self.output_formatfile += ' anomaly_df=df[df[\\'anomaly\\'] == True]\\n'
self.output_formatfile += ' anomaly_prediction_df=self.find_point_subsequence_anomalies(datetimeFeature,anomaly_df)\\n'
self.output_formatfile += ' new_dir = str(Path(__file__).parent.parent/\\'data\\')\\n'
self.output_formatfile += ' try:\\n'
self.output_formatfile += ' anomaly_prediction_df[datetimeFeature]=pd.to_datetime(anomaly_prediction_df[datetimeFeature])\\n'
self.output_formatfile += ' df[datetimeFeature]=pd.to_datetime(df[datetimeFeature])\\n'
self.output_formatfile += ' except:\\n'
self.output_formatfile += ' pass\\n'
self.output_formatfile += ' try:\\n'
self.output_formatfile += ' df_out = pd.merge(df, anomaly_prediction_df, on=df.columns.values.tolist(), how=\\'left\\')\\n'
self.output_formatfile += ' df_out[\\'anomaly\\'].replace([\\'None\\', \\'NaN\\', np.nan], "Normal", inplace=True)\\n'
self.output_formatfile += ' df_out.to_csv(f"{new_dir}/overall_ad_output.csv") \\n'
self.output_formatfile += ' df_out[datetimeFeature]=df_out[datetimeFeature].astype(str)\\n'
self.output_formatfile += ' except Exception as e:\\n'
self.output_formatfile += ' print("anomaly data updated.")\\n'
self.output_formatfile += ' df_out[datetimeFeature]=df_out[datetimeFeature].astype(str)\\n'
self.output_formatfile += ' df=df_out \\n'
else:
self.output_formatfile += ' df[\\'anomaly\\'] = modeloutput[\\'cluster\\']== -1\\n'
self.output_formatfile += ' df.sort_values(by=[\\'anomaly\\'], ascending=False, inplace=True)\\n'
else:
self.output_formatfile += ' df[\\'prediction\\'] = modeloutput'
self.output_formatfile += '\\n'
else:
self.output_formatfile += ' df[\\'prediction\\'] = modeloutput.idxmax(axis=1)'
self.output_formatfile += '\\n'
if learner_type != 'DL':
self.output_formatfile += ' df[\\'probability\\'] = modeloutput.max(axis=1).round(2)'
self.output_formatfile += '\\n'
self.output_formatfile += ' df[\\'remarks\\'] = modeloutput.apply(lambda x: x.to_json(double_precision=2), axis=1)'
self.output_formatfile += '\\n'
else: |
if model == 'COX':
self.output_formatfile += '\\n'
self.output_formatfile += ' modeloutput[0] = modeloutput[0].round(2)'
self.output_formatfile += '\\n'
#self.output_formatfile += ' modeloutput = modeloutput[0].to_json(orient=\\'records\\',double_precision=2)'
#self.output_formatfile += '\\n'
self.output_formatfile += ' df[\\'prediction\\'] = modeloutput'
self.output_formatfile += '\\n'
else:
self.output_formatfile += ' df[\\'prediction\\'] = modeloutput[0]'
if(learner_type == 'objectDetection'):
self.output_formatfile += '\\n'
self.output_formatfile += ' df[\\'prediction\\'] = df[\\'prediction\\']'
else:
self.output_formatfile += '\\n'
self.output_formatfile += ' df[\\'prediction\\'] = df[\\'prediction\\'].round(2)'
self.output_formatfile += '\\n'
self.output_formatfile += ' outputjson = df.to_json(orient=\\'records\\',double_precision=2)'
self.output_formatfile += '\\n'
self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}'
self.output_formatfile += '\\n'
self.output_formatfile += ' return(json.dumps(outputjson))'
filename = os.path.join(deploy_path,'script','output_format.py')
#print(deploy_path)
f = open(filename, "wb")
self.log.info('-------> Output Mapping File Location :'+filename)
f.write(str(self.output_formatfile).encode('utf8'))
f.close()<s> #task 11190: Item based Recommender system---Usnish
import os
def generate_recommender_code(deployPath):
code = """
import pandas as pd
import numpy as np
import os
ITEMID = 'itemId'
DATA_FOLDER = 'data'
USER_ITEM_MATRIX = 'user_item_matrix.csv'
ITEM_SIMILARITY_MATRIX = 'item_similarity_matrix.csv'
RATING = 'rating'
SIMILARITY_SCORE = 'similarity_score'
class collaborative_filter(object):
def __init__(self):
self.matrix = pd.read_csv(os.path.join(os.path.dirname(__file__), '..', DATA_FOLDER, USER_ITEM_MATRIX),index_col=0)
self.matrix.index.name = ITEMID
self.item_similarity_cosine = pd.read_csv(os.path.join(os.path.dirname(__file__), '..', DATA_FOLDER, ITEM_SIMILARITY_MATRIX))
self.item_similarity_cosine.index.name = ITEMID
self.item_similarity_cosine.columns.name = ITEMID
def item_based_rec(self,picked_userid, number_of_recommendations,number_of_similar_items=5):
import operator
if not isinstance(picked_userid,str):
picked_userid = str(picked_userid)
if picked_userid not in self.matrix.columns:
raise KeyError("UserID Does Not Exist")
# Movies that the target user has not watched
try:
picked_userid_unwatched = pd.DataFrame(self.matrix[picked_userid].isna()).reset_index()
picked_userid_unwatched = picked_userid_unwatched[picked_userid_unwatched[picked_userid] == True][ITEMID].values.tolist()
# Movies that the target user has watched
picked_userid_watched = pd.DataFrame(self.matrix[picked_userid].dropna(axis=0, how='all') \\
.sort_values(ascending=False)) \\
.reset_index() \\
.rename(columns={picked_userid: 'rating'})
# Dictionary to save the unwatched movie and predicted rating pair
rating_prediction = {}
# Loop through unwatched movies
for picked_movie in picked_userid_unwatched:
if not isinstance(picked_movie,str):
picked_movie = str(picked_movie)
# Calculate the similarity score of the picked movie with other movies
try:
picked_movie_similarity_score = self.item_similarity_cosine[[picked_movie]].reset_index().rename(
columns={picked_movie: SIMILARITY_SCORE})
# Rank the similarities between the picked user watched movie and the picked unwatched movie.
picked_userid_watched_similarity = pd.merge(left=picked_userid_watched,
right=picked_movie_similarity_score,
on=ITEMID,
how='inner') \\
.sort_values(SIMILARITY_SCORE, ascending=False)[
:number_of_similar_items]
# Calculate the predicted rating using weighted average of similarity scores and the ratings from picked user
try:
predicted_rating = round(np.average(picked_userid_watched_similarity[RATING],weights=picked_userid_watched_similarity[SIMILARITY_SCORE]), 6)
except Exception as e:
predicted_rating = 0
# Save the predicted rating in the dictionary
rating_prediction[picked_movie] = predicted_rating
except Exception as e:
rating_prediction[picked_movie] = 0
# Return the top recommended movies
return sorted(rating_prediction.items(), key=operator.itemgetter(1), reverse=True)[:number_of_recommendations]
except Exception as e:
print(e)
raise KeyError(str(e))
def predict(self,X):
predictions = []
for index,row in X.iterrows():
score = self.item_based_rec(int(row["uid"]),int(row["numberOfRecommendation"]))
df = pd.DataFrame(score,columns=['ItemId','Ratings'])
predictions.append(df)
return predictions"""
filename = os.path.join(deployPath, 'script', 'item_recommendation.py')
# print(deploy_path)
f = open(filename, "wb")
f.write(str(code).encode('utf8'))
f.close()
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import json
from pathlib import Path
from AION.prediction_package.imports import importModule
from AION.prediction_package import utility
from AION.prediction_package.utility import TAB_CHAR
from importlib.metadata import version
"""
This file provide the functionality which is common for most of the
problem types deployment.
"""
def main_code():
return """
class predict():
def __init__(self):
self.profiler = inputprofiler()
self.selector = selector()
self.trainer = trainer()
self.formatter = output_format()
def run(self, data):
try:
df = self._parse_data(data)
raw_df = df.copy()
df = self.profiler.run(df)
df = self.selector.run(df)
df = self.trainer.run(df)
output = self.formatter.run(raw_df, df)
print("predictions:",output)
return (output)
except Exception as e:
output = {"status":"FAIL","message":str(e).strip('"')}
print("predictions:",json.dumps(output))
return (json.dumps(output))
def _parse_data(self, data):
file_path = Path(data)
if file_path.suffix == ".tsv":
df = pd.read_csv(data,encoding='utf-8',sep='\\\\t',skipinitialspace = True,na_values=['-','?'])
elif file_path.suffix in [".csv", ".dat"]:
df=pd.read_csv(data,encoding='utf-8',skipinitialspace = True,na_values=['-','?'])
elif file_path.suffix in [".gz"] and file_path.stem.endswith('.csv'):
df=pd.read_csv(data,encoding='utf-8',skipinitialspace = True,na_values=['-','?'])
elif file_path.suffix == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
df = pd.json_normalize(jsonData)
else:
jsonData = json.loads(data)
df = pd.json_normalize(jsonData)
return df
import sys
if __name__ == "__main__":
output = predict().run(sys.argv[1])
"""
def profiler_code(params, indent=0):
"""
This will create the profiler file based on the config file.
separated file is created as profiler is required for input drift also.
"""
imported_modules = [
{'module': 'json', 'mod_from': None, 'mod_as': None},
{'module': 'scipy', 'mod_from': None, 'mod_as': None},
{'module': 'joblib', 'mod_from': None, 'mod_as': None},
{'module': 'numpy', 'mod_from': None, 'mod_as': 'np'},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}
]
importer = importModule()
utility.import_modules(importer, imported_modules)
code = """
class inputprofiler():
"""
init_code = """
def __init__(self):
"""
if params.get('text_features'):
imported_modules.append({'module':'importlib.util'})
init_code += """
# preprocessing
preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl'
if not preprocess_path.exists():
raise ValueError(f'Preprocess model file not found: {preprocess_path}')
self.profiler = joblib.load(preprocess_path)
"""
run_code = """
def run(self,df):
df = df.replace(r'^\\s*$', np.NaN, regex=True)
"""
if params.get('input_features_type'):
imported_modules.append({'module':'dtype','mod_from':'numpy'})
run_code += f"""
df = df.astype({params.get('input_features_type')})
"""
if params.get('word2num_features'):
imported_modules.append({'module':'w2n','mod_from':'word2number'})
run_code += f"""
def s2n(value):
try:
x=eval(value)
return x
except:
try:
return w2n.word_to_num(value)
except:
return np.nan
df[{params['word2num_features']}] = df[{params['word2num_features']}].apply(lambda x: s2n(x))"""
if params.get('unpreprocessed_columns'):
run_code += f"""
unpreprocessed_data = df['{params['unpreprocessed_columns'][0]}']
df.drop(['{params['unpreprocessed_columns'][0]}'], axis=1,inplace=True)
"""
if params.get('force_numeric_conv'):
run_code += f"""
df[{params['force_numeric_conv']}] = df[{params['force_numeric_conv']}].apply(pd.to_numeric,errors='coerce')"""
if params.get('conversion_method','').lower() == 'glove':
code_text, modules = __profiler_glove_code(params)
imported_modules.extend( modules)
init_code += code_text
elif params.get('conversion_method','').lower() == 'fasttext':
init_code += __profiler_fasttext_code(params)
run_code += __profiler_main_code(params)
if params.get('unpreprocessed_columns'):
run_code += f"""
df['{params.get('unpreprocessed_columns')[0]}'] = unpreprocessed_data
"""
utility.import_modules(importer, imported_modules)
import_code = importer.getCode()
return import_code + code + init_code + run_code
def __profiler_glove_code(params, indent=2):
modules = []
modules.append({'module':'load_pretrained','mod_from':'text.Embedding'})
modules.append({'module':'TextProcessing','mod_from':'text'})
code = """
model_path = TextProcessing.checkAndDownloadPretrainedModel('glove')
embed_size, pretrained_model = load_pretrained(model_path)
self.profiler.set_params(text_process__vectorizer__external_model = pretrained_model)
"""
return code.replace('\\n', '\\n'+(indent * TAB_CHAR)), modules
def __profiler_fasttext_code(params, indent=2):
code = """
def get_pretrained_model_path():
try:
from AION.appbe.dataPath import DATA_DIR
modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextProcessing'
except:
modelsPath = Path('aion')/'PreTrainedModels'/'TextProcessing'
if not modelsPath.exists():
modelsPath.mkdir(parents=True, exist_ok=True)
return modelsPath
if not importlib.util.find_spec('fasttext'):
raise ValueError('fastText not installed')
else:
import os
import fasttext
import fasttext.util
cwd = os.getcwd()
os.chdir(get_pretrained_model_path())
fasttext.util.download_model('en', if_exists='ignore')
pretrained_model = fasttext.load_model('cc.en.300.bin')
os.chdir(cwd)
self.profiler.set_params(text_process__vectorizer__external_model = pretrained_model)
self.profiler.set_params(text_process__vectorizer__external_model_type = 'binary')
"""
return code.replace('\\n', '\\n'+(indent * TAB_CHAR))
def __profiler_main_code(params, indent=2):
code = f"""
df = self.profiler.transform(df)
columns = { |
params['output_features']}
if isinstance(df, scipy.sparse.spmatrix):
df = pd.DataFrame(df.toarray(), columns=columns)
else:
df = pd.DataFrame(df, columns=columns)
return df
"""
return code.replace('\\n', '\\n'+(indent * TAB_CHAR))
def feature_selector_code( params, indent=0):
modules = [
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}
]
code = """
class selector():
# this class
def __init__(self):
pass
def run(self, df):"""
code +=f"""
return df[{params['output_features']}]
"""
return code, modules
def feature_reducer_code( params, indent=0):
modules = [
{'module': 'joblib', 'mod_from': None, 'mod_as': None},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}
]
code = f"""
class selector():
def __init__(self):
reducer_file = (Path(__file__).parent/"model")/"{params['reducer_file']}"
if not reducer_file.exists():
raise ValueError(f'Failed to load Feature Engineering model file: {{reducer_file}}')
self.model = joblib.load(reducer_file)
def run(self, df):
reducer_input = {params['input_features']}
reducer_output = {params['output_features']}
df = self.model.transform(df[reducer_input])
return pd.DataFrame(df,columns=reducer_output)
"""
if indent:
code = code.replace('\\n', '\\n'+(indent * TAB_CHAR))
return code, modules
def create_feature_list(config=None, target_feature=None, deploy_path=None):
featurelist = []
if 'profiler' in config:
if 'input_features_type' in config['profiler']:
input_features = config['profiler']['input_features_type']
for x in input_features:
featurelt={}
featurelt['feature'] = x
if x == target_feature:
featurelt['Type'] = 'Target'
else:
if input_features[x] in ['int','int64','float','float64']:
featurelt['Type'] = 'Numeric'
elif input_features[x] == 'object':
featurelt['Type'] = 'Text'
elif input_features[x] == 'category':
featurelt['Type'] = 'Category'
else:
featurelt['Type'] = 'Unknown'
featurelist.append(featurelt)
featurefile = f"""
import json
def getfeatures():
try:
features = {featurelist}
outputjson = {{"status":"SUCCESS","features":features}}
output = json.dumps(outputjson)
print("Features:",output)
return(output)
except Exception as e:
output = {{"status":"FAIL","message":str(e).strip(\\'"\\')}}
print("Features:",json.dumps(output))
return (json.dumps(output))
if __name__ == "__main__":
output = getfeatures()
"""
with open( deploy_path/'featureslist.py', 'wb') as f:
f.write( str(featurefile).encode('utf8'))
def requirement_file(deploy_path,model,textFeatures,learner_type='ML'):
modules = ['pandas','numpy','alibi','matplotlib','joblib','shap','ipython','category_encoders','scikit-learn','word2number','flask_restful','evidently','Flask-Cors']
requires = ''
for mod in modules:
requires += f"{mod}=={version(mod)}\\n"
if len(textFeatures) > 0:
tmodules = ['spacy','nltk','textblob','demoji','beautifulsoup4','text-unidecode','pyspellchecker','contractions','protobuf']
for mod in tmodules:
requires += f"{mod}=={version(mod)}\\n"
if model == 'Extreme Gradient Boosting (XGBoost)':
mmodules = ['xgboost']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model == 'Light Gradient Boosting (LightGBM)':
mmodules = ['lightgbm']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model == 'Categorical Boosting (CatBoost)':
mmodules = ['catboost']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model.lower() == 'arima':
mmodules = ['pmdarima']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model.lower() == 'fbprophet':
mmodules = ['prophet']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model.lower() == 'lstm' or model.lower() == 'mlp' or learner_type =='DL':
mmodules = ['tensorflow']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model.lower() in ['cox', 'kaplanmeierfitter']: #bug 12833
mmodules = ['lifelines']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model.lower() == 'sentencetransformer': #bug 12833
mmodules = ['sentence_transformers']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
with open( deploy_path/'requirements.txt', 'wb') as f:
f.write(str(requires).encode('utf8'))
def create_readme_file(deploy_path,modelfile,features):
data = json.dumps([{x:x+'_value'} for x in features])
backslash_data = data.replace('"', '\\\\"')
content = f"""
========== Files Structures ==========
{modelfile} ------ Trained Model
aion_prediction.py --> Python package entry point
script/inputprofiler.py --> Profiling like FillNA and Category to Numeric
========== How to call the model ==========
============== From Windows Terminal ==========
python aion_prediction.py "{backslash_data}"
============== From Linux Terminal ==========
python aion_prediction.py "{data}"
============== Output ==========
{{"status":"SUCCESS","data":[{{"Data1":"Value","prediction":"Value"}}]}} ## for single Row/Record
{{"status":"SUCCESS","data":[{{"Data1":"Value","prediction":"Value"}},{{"Data1":"Value","prediction":"Value"}}]}} ## For Multiple Row/Record
{{"status":"ERROR","message":"description"}} ## In Case Exception or Error
"""
filename = deploy_path/'readme.txt'
with open(filename, 'w') as f:
f.write(content)
def create_util_folder(deploy_path):
import tarfile
ext_path = Path(__file__).parent.parent/'utilities'
for x in ext_path.iterdir():
if x.suffix == '.tar':
if x.name not in ['scikit_surprise-1.1.1.dist-info.tar','surprise.tar']:
my_tar = tarfile.open(x)
my_tar.extractall(deploy_path)
my_tar.close()
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import platform
import json
import shutil
import logging
class aionPrediction:
def __init__(self):
self.log = logging.getLogger('eion')
def create_optimus_prediction_file (self,classname,deploy_path,learner_type):
self.predictionFile = 'import warnings'
self.predictionFile += '\\n'
self.predictionFile += 'warnings.filterwarnings("ignore")'
self.predictionFile += '\\n'
self.predictionFile += 'import json'
self.predictionFile += '\\n'
self.predictionFile += 'import os'
self.predictionFile += '\\n'
self.predictionFile += 'import sys'
self.predictionFile += '\\n'
self.predictionFile += 'import pandas as pd'
self.predictionFile += '\\n'
self.predictionFile += 'from pandas import json_normalize'
self.predictionFile += '\\n'
self.predictionFile += 'from importlib import import_module'
self.predictionFile += '\\n'
self.predictionFile += 'import importlib.util'
self.predictionFile += '\\n'
self.predictionFile += 'class prediction:'
self.predictionFile += '\\n'
self.predictionFile += ' def predict_from_json(self,json_data):'
self.predictionFile += '\\n'
self.predictionFile += ' data = json.loads(json_data)'
self.predictionFile += '\\n'
self.predictionFile += ' output=self.predict(data)'
self.predictionFile += '\\n'
self.predictionFile += ' print("predictions:",output)'
self.predictionFile += '\\n'
self.predictionFile += '\\n'
self.predictionFile += ' def predict_from_file(self,filename):'
self.predictionFile += '\\n'
self.predictionFile += ' with open(filename,\\'r\\',encoding=\\'utf-8\\') as f:'
self.predictionFile += '\\n'
self.predictionFile += ' data = json.load(f)'
self.predictionFile += '\\n'
self.predictionFile += ' output=self.predict(data)'
self.predictionFile += '\\n'
self.predictionFile += ' print("predictions:",output)'
self.predictionFile += '\\n'
self.predictionFile += '\\n'
self.predictionFile += ' def predict(self,json_data):'
self.predictionFile += '\\n'
self.predictionFile += ' try:'
self.predictionFile += '\\n'
#self.predictionFile += ' jsonData = json.loads(json_data)'
self.predictionFile += ' jsonData=json_data'
self.predictionFile += '\\n'
self.predictionFile += ' model_obj = importlib.util.spec_from_file_location("module.name", os.path.dirname(os.path.abspath(__file__))+"/trained_model.py")'
self.predictionFile += '\\n'
self.predictionFile += ' model = importlib.util.module_from_spec(model_obj)'
self.predictionFile += '\\n'
self.predictionFile += ' model_obj.loader.exec_module(model)'
self.predictionFile += '\\n'
#if(learner_type != 'TextML'):
self.predictionFile += ' profiler_obj = importlib.util.spec_from_file_location("module.name", os.path.dirname(os.path.abspath(__file__))+"/inputprofiler.py")'
self.predictionFile += '\\n'
self.predictionFile += ' inputprofiler = importlib.util.module_from_spec(profiler_obj)'
self.predictionFile += '\\n'
self.predictionFile += ' profiler_obj.loader.exec_module(inputprofiler)'
self.predictionFile += '\\n'
self.predictionFile += ' selector_obj = importlib.util.spec_from_file_location("module.name", os.path.dirname(os.path.abspath(__file__))+"/selector.py")'
self.predictionFile += '\\n'
self.predictionFile += ' selector = importlib.util.module_from_spec(selector_obj)'
self.predictionFile += '\\n'
self.predictionFile += ' selector_obj.loader.exec_module(selector)'
self.predictionFile += '\\n'
self.predictionFile += ' output_format_obj = importlib.util.spec_from_file_location("module.name", os.path.dirname(os.path.abspath(__file__))+"/output_format.py")'
self.predictionFile += '\\n'
self.predictionFile += ' output_format = importlib.util.module_from_spec(output_format_obj)'
self.predictionFile += '\\n'
self.predictionFile += ' output_format_obj.loader.exec_module(output_format)'
self.predictionFile += '\\n'
self.predictionFile += ' df = json_normalize(jsonData)'
self.predictionFile += '\\n'
self.predictionFile += ' df0 = df.copy()'
self.predictionFile += '\\n'
#if(learner_type != 'TextML'):
self.predictionFile += ' profilerobj = inputprofiler.inputprofiler()'
self.predictionFile += '\\n'
self.predictionFile += ' df = profilerobj.apply_profiler(df)'
self.predictionFile += '\\n'
self.predictionFile += ' selectobj = selector.selector()'
self.predictionFile += '\\n'
self.predictionFile += ' df = selectobj.apply_selector(df)'
self.predictionFile += '\\n'
self.predictionFile += ' output = model.trained_model().predict(df,"")'
self.predictionFile += '\\n'
self.predictionFile += ' outputobj = output_format.output_format()'
self.predictionFile += '\\n'
self.predictionFile += ' output = outputobj.apply_output_format(df0,output)'
#self.predictionFile += '\\n'
#self.predictionFile += ' print(output)'
self.predictionFile += '\\n'
self.predictionFile += ' return output'
self.predictionFile += '\\n'
self.predictionFile += ' except KeyError as e:'
self.predictionFile += '\\n'
self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\\'"\\')}'
self.predictionFile += '\\n'
self.predictionFile += ' return json.dumps(output)'
self.predictionFile += '\\n'
self.predictionFile += ' except Exception as e:'
self.predictionFile += '\\n'
self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\\'"\\')}'
self.predictionFile += '\\n'
self.predictionFile += ' return json.dumps(output)'
self.predictionFile += '\\n'
self.prediction |
File += '\\n'
self.predictionFile += 'if __name__ == "__main__":'
self.predictionFile += '\\n'
self.predictionFile += ' predictobj = prediction()'
self.predictionFile += '\\n'
self.predictionFile += ' predictobj.predict_from_file(sys.argv[1])'
self.predictionFile += '\\n'
filename = os.path.join(deploy_path,'prediction.py')
f = open(filename, "wb")
f.write(str(self.predictionFile).encode('utf8'))
f.close()
def create_text_drift_file(self,deploy_path,features,target,model_type): #task-14549
self.predictionFile = 'import warnings'
self.predictionFile += '\\n'
self.predictionFile += 'warnings.filterwarnings("ignore")'
self.predictionFile += '\\n'
self.predictionFile += 'import json'
self.predictionFile += '\\n'
self.predictionFile += 'import os'
self.predictionFile += '\\n'
self.predictionFile += 'import sys'
self.predictionFile += '\\n'
self.predictionFile += 'import pandas as pd'
self.predictionFile += '\\n'
self.predictionFile += 'from monitoring import check_drift'
self.predictionFile += '\\n'
self.predictionFile += 'def drift(data):'
self.predictionFile += '\\n'
self.predictionFile += ' try:'
self.predictionFile += '\\n'
self.predictionFile += ' if os.path.splitext(data)[1] == ".json":'
self.predictionFile += '\\n'
self.predictionFile += ' with open(data,\\'r\\',encoding=\\'utf-8\\') as f:'
self.predictionFile += '\\n'
self.predictionFile += ' jsonData = json.load(f)'
self.predictionFile += '\\n'
self.predictionFile += ' else:'
self.predictionFile += '\\n'
self.predictionFile += ' jsonData = json.loads(data)'
self.predictionFile += '\\n'
self.predictionFile += ' jsonData[\\'features\\'] = \\''+",".join([feature for feature in features])+'\\''
self.predictionFile += '\\n'
self.predictionFile += ' jsonData[\\'target\\'] = \\''+target+'\\''
self.predictionFile += '\\n'
if model_type.lower() != 'timeseriesforecasting': #task 11997
self.predictionFile += ' htmlfilepath=evidently_details(jsonData)'
self.predictionFile += '\\n'
else:
self.predictionFile += ' htmlfilepath=\\'\\''
self.predictionFile += '\\n'
self.predictionFile += ' jsonData = json.dumps(jsonData)'
self.predictionFile += '\\n'
self.predictionFile += ' output = check_drift(jsonData)'
self.predictionFile += '\\n'
self.predictionFile += ' output = json.loads(output)'
self.predictionFile += '\\n'
self.predictionFile += ' output[\\'htmlPath\\'] = str(htmlfilepath)'
self.predictionFile += '\\n'
self.predictionFile += ' print("drift:", json.dumps(output))'
self.predictionFile += '\\n'
self.predictionFile += ' return(output)'
self.predictionFile += '\\n'
self.predictionFile += ' except KeyError as e:'
self.predictionFile += '\\n'
self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\\'"\\')}'
self.predictionFile += '\\n'
self.predictionFile += ' print("drift:",json.dumps(output))'
self.predictionFile += '\\n'
self.predictionFile += ' return (json.dumps(output))'
self.predictionFile += '\\n'
self.predictionFile += ' except Exception as e:'
self.predictionFile += '\\n'
self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\\'"\\')}'
self.predictionFile += '\\n'
self.predictionFile += ' print("drift:",json.dumps(output))'
self.predictionFile += '\\n'
self.predictionFile += ' return (json.dumps(output))'
self.predictionFile += '\\n'
if model_type.lower() != 'timeseriesforecasting': #task 11997
self.predictionFile += 'def evidently_details(deployJson):'
self.predictionFile += '\\n'
self.predictionFile += ' features = deployJson[\\'features\\'].split(\\',\\')'
self.predictionFile += '\\n'
self.predictionFile += ' target = deployJson[\\'target\\']'
self.predictionFile += '\\n'
self.predictionFile += """\\
try:
from evidently.report import Report
from evidently.metrics import TextDescriptorsDriftMetric, ColumnDriftMetric
from evidently.pipeline.column_mapping import ColumnMapping
from sklearn.preprocessing import LabelEncoder
historicaldataFrame=pd.read_csv(deployJson['trainingDataLocation'],skipinitialspace = True,na_values=['-','?'])
currentdataFrame=pd.read_csv(deployJson['currentDataLocation'],skipinitialspace = True,na_values=['-','?'])
historicaldataFrame.columns = historicaldataFrame.columns.str.strip()
currentdataFrame.columns = currentdataFrame.columns.str.strip()
hdf = historicaldataFrame.dropna(subset=features)
cdf = currentdataFrame.dropna(subset=features)
hdf['Text_Features'] = hdf[features].apply("-".join, axis=1)
cdf['Text_Features'] = cdf[features].apply("-".join, axis=1)
hdf['target'] = historicaldataFrame[target]
cdf['target'] = currentdataFrame[target]
le = LabelEncoder()
le.fit(hdf['target'])
hdf['target'] = le.transform(hdf['target'])
le.fit(cdf['target'])
cdf['target'] = le.transform(cdf['target'])
hd = hdf[['Text_Features', 'target']]
cd = cdf[['Text_Features', 'target']]
column_mapping = ColumnMapping()
column_mapping.target = 'target'
column_mapping.prediction = 'target'
column_mapping.text_features = ['Text_Features']
column_mapping.numerical_features = []
column_mapping.categorical_features = []
performance_report = Report(metrics=[ColumnDriftMetric('target'),TextDescriptorsDriftMetric(column_name='Text_Features')])
performance_report.run(reference_data=hd, current_data=cd,column_mapping=column_mapping)
report = os.path.join(os.path.dirname(os.path.abspath(__file__)),"log","My_report.html")
performance_report.save_html(report)
return(report)
except Exception as e:
print('Error: ', e)
return('NA')"""
self.predictionFile += '\\n'
self.predictionFile += 'if __name__ == "__main__":'
self.predictionFile += '\\n'
self.predictionFile += ' output = drift(sys.argv[1])'
filename = os.path.join(deploy_path,'aion_ipdrift.py')
f = open(filename, "wb")
f.write(str(self.predictionFile).encode('utf8'))
f.close()
def create_drift_file(self,deploy_path,features,target,model_type):
self.predictionFile = 'import warnings'
self.predictionFile += '\\n'
self.predictionFile += 'warnings.filterwarnings("ignore")'
self.predictionFile += '\\n'
self.predictionFile += 'import json'
self.predictionFile += '\\n'
self.predictionFile += 'import os'
self.predictionFile += '\\n'
self.predictionFile += 'import sys'
self.predictionFile += '\\n'
self.predictionFile += 'import pandas as pd'
self.predictionFile += '\\n'
self.predictionFile += 'from monitoring import check_drift'
self.predictionFile += '\\n'
self.predictionFile += 'from pandas import json_normalize'
self.predictionFile += '\\n'
self.predictionFile += 'from script.inputprofiler import inputprofiler'
self.predictionFile += '\\n'
self.predictionFile += 'def drift(data):'
self.predictionFile += '\\n'
self.predictionFile += ' try:'
self.predictionFile += '\\n'
self.predictionFile += ' if os.path.splitext(data)[1] == ".json":'
self.predictionFile += '\\n'
self.predictionFile += ' with open(data,\\'r\\',encoding=\\'utf-8\\') as f:'
self.predictionFile += '\\n'
self.predictionFile += ' jsonData = json.load(f)'
self.predictionFile += '\\n'
self.predictionFile += ' else:'
self.predictionFile += '\\n'
self.predictionFile += ' jsonData = json.loads(data)'
self.predictionFile += '\\n'
self.predictionFile += ' jsonData[\\'features\\'] = \\''+",".join([feature for feature in features])+'\\''
self.predictionFile += '\\n'
self.predictionFile += ' jsonData[\\'target\\'] = \\''+target+'\\''
self.predictionFile += '\\n'
if model_type.lower() != 'timeseriesforecasting': #task 11997
self.predictionFile += ' htmlfilepath=evidently_details(jsonData)'
self.predictionFile += '\\n'
else:
self.predictionFile += ' htmlfilepath=\\'\\''
self.predictionFile += '\\n'
self.predictionFile += ' jsonData = json.dumps(jsonData)'
self.predictionFile += '\\n'
self.predictionFile += ' output = check_drift(jsonData)'
self.predictionFile += '\\n'
self.predictionFile += ' output = json.loads(output)'
self.predictionFile += '\\n'
self.predictionFile += ' output[\\'htmlPath\\'] = str(htmlfilepath)'
self.predictionFile += '\\n'
self.predictionFile += ' output = json.dumps(output)'
self.predictionFile += '\\n'
self.predictionFile += ' print("drift:",output)'
self.predictionFile += '\\n'
self.predictionFile += ' return(output)'
self.predictionFile += '\\n'
self.predictionFile += ' except KeyError as e:'
self.predictionFile += '\\n'
self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\\'"\\')}'
self.predictionFile += '\\n'
self.predictionFile += ' print("drift:",json.dumps(output))'
self.predictionFile += '\\n'
self.predictionFile += ' return (json.dumps(output))'
self.predictionFile += '\\n'
self.predictionFile += ' except Exception as e:'
self.predictionFile += '\\n'
self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\\'"\\')}'
self.predictionFile += '\\n'
self.predictionFile += ' print("drift:",json.dumps(output))'
self.predictionFile += '\\n'
self.predictionFile += ' return (json.dumps(output))'
self.predictionFile += '\\n'
if model_type.lower() != 'timeseriesforecasting': #task 11997
self.predictionFile += 'def evidently_details(deployJson):'
self.predictionFile += '\\n'
self.predictionFile += ' features = deployJson[\\'features\\'].split(\\',\\')'
self.predictionFile += '\\n'
self.predictionFile += ' target = deployJson[\\'target\\']'
self.predictionFile += '\\n'
self.predictionFile += """\\
try:
from evidently.report import Report
from evidently.metric_preset import DataDriftPreset
historicaldataFrame=pd.read_csv(deployJson['trainingDataLocation'],skipinitialspace = True,na_values=['-','?'])
currentdataFrame=pd.read_csv(deployJson['currentDataLocation'],skipinitialspace = True,na_values=['-','?'])
historicaldataFrame.columns = historicaldataFrame.columns.str.strip()
currentdataFrame.columns = currentdataFrame.columns.str.strip()
profilerobj = inputprofiler()
historicaldataFramep = profilerobj.run(historicaldataFrame)
currentdataFramep = profilerobj.run(currentdataFrame)
hdf = historicaldataFramep[features]
cdf = currentdataFramep[features]
hdf['target'] = historicaldataFrame[target]
cdf['target'] = currentdataFrame[target]
data_drift_report = Report(metrics = [DataDriftPreset()])
data_drift_report.run(reference_data=hdf,current_data=cdf,column_mapping = None)
report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','my_report.html')
data_drift_report.save_html(report)
return(report)
except Exception as e:
print('Error')
return('NA')"""
self.predictionFile += '\\n'
self.predictionFile += 'if __name__ == "__main__":'
self.predictionFile += '\\n'
self.predictionFile += ' output = drift(sys.argv[1])'
filename = os.path.join(deploy_path,'aion_ipdrift.py')
f = open(filename, "wb")
f.write(str(self.predictionFile).encode('utf8'))
f.close()
def create_prediction_file(self,classname,deploy_path,learner_type,grouperbyjson,rowfilterexpression,model_type,datetimeFeature):
self.predictionFile = 'import warnings'
self.predictionFile += '\\n'
self.predictionFile += 'warnings.filterwarnings("ignore")'
self.predictionFile += '\\n'
self.predictionFile += 'import json'
self.predictionFile += '\\n'
self.predictionFile += 'import os'
self.predictionFile += '\\n'
self.predictionFile += 'import sys'
self.predictionFile += '\\n'
self.predictionFile += 'import pandas as pd'
self.predictionFile += '\\n'
self.predictionFile += 'from pandas import json_normalize'
self.predictionFile += '\\n'
if(learner_type.lower() != 'recommendersystem'): #task 11190 |
self.predictionFile += 'from script.selector import selector'
self.predictionFile += '\\n'
self.predictionFile += 'from script.inputprofiler import inputprofiler'
self.predictionFile += '\\n'
#self.predictionFile += 'from '+classname+' import '+classname
self.predictionFile += 'from script.trained_model import trained_model'
self.predictionFile += '\\n'
else:
self.predictionFile += 'from script.item_recommendation import collaborative_filter'
self.predictionFile += '\\n'
self.predictionFile += 'from script.output_format import output_format'
self.predictionFile += '\\n'
if (learner_type != 'RecommenderSystem'): #task 11190
self.predictionFile += 'profilerobj = inputprofiler()'
self.predictionFile += '\\n'
self.predictionFile += 'selectobj = selector()'
self.predictionFile += '\\n'
self.predictionFile += 'modelobj = trained_model()'
self.predictionFile += '\\n'
else:
self.predictionFile += 'colabobj = collaborative_filter()'
self.predictionFile += '\\n'
self.predictionFile += 'outputobj = output_format()'
self.predictionFile += '\\n'
self.predictionFile += 'def predict(data):'
self.predictionFile += '\\n'
self.predictionFile += ' try:'
self.predictionFile += '\\n'
self.predictionFile += ' if os.path.splitext(data)[1] == ".tsv":'
self.predictionFile += '\\n'
self.predictionFile += ' df=pd.read_csv(data,encoding=\\'utf-8\\',sep=\\'\\\\t\\',skipinitialspace = True,na_values=[\\'-\\',\\'?\\'])'
self.predictionFile += '\\n'
self.predictionFile += ' elif os.path.splitext(data)[1] == ".csv":'
self.predictionFile += '\\n'
self.predictionFile += ' df=pd.read_csv(data,encoding=\\'utf-8\\',skipinitialspace = True,na_values=[\\'-\\',\\'?\\'])'
self.predictionFile += '\\n'
self.predictionFile += ' elif os.path.splitext(data)[1] == ".dat":'
self.predictionFile += '\\n'
self.predictionFile += ' df=pd.read_csv(data,encoding=\\'utf-8\\',skipinitialspace = True,na_values=[\\'-\\',\\'?\\'])'
self.predictionFile += '\\n'
self.predictionFile += ' else:'
self.predictionFile += '\\n'
self.predictionFile += ' if os.path.splitext(data)[1] == ".json":'
self.predictionFile += '\\n'
self.predictionFile += ' with open(data,\\'r\\',encoding=\\'utf-8\\') as f:'
self.predictionFile += '\\n'
self.predictionFile += ' jsonData = json.load(f)'
self.predictionFile += '\\n'
self.predictionFile += ' else:'
self.predictionFile += '\\n'
self.predictionFile += ' jsonData = json.loads(data)'
self.predictionFile += '\\n'
self.predictionFile += ' df = json_normalize(jsonData)'
self.predictionFile += '\\n'
self.predictionFile += ' df.rename(columns=lambda x: x.strip(), inplace=True)'
self.predictionFile += '\\n'
if str(rowfilterexpression) != '':
self.predictionFile += ' filterexpression = "'+rowfilterexpression+'"'
self.predictionFile += '\\n'
self.predictionFile += ' df = df.query(filterexpression)'
self.predictionFile += '\\n'
#print(grouperbyjson)
if str(grouperbyjson) != '':
datetime = grouperbyjson['datetime']
unit = grouperbyjson['unit']
if unit == '':
self.predictionFile += ' df[\\'date\\'] = pd.to_datetime(df[\\''+datetime+'\\'])'
self.predictionFile += '\\n'
else:
self.predictionFile += ' df[\\'date\\'] = pd.to_datetime(df[\\''+datetime+'\\'],unit=\\''+unit+'\\')'
self.predictionFile += '\\n'
self.predictionFile += ' df = df.reset_index()'
self.predictionFile += '\\n'
self.predictionFile += ' df.set_index(\\'date\\',inplace=True)'
self.predictionFile += '\\n'
self.predictionFile += ' df = df.'+grouperbyjson['groupbystring']
self.predictionFile += '\\n'
self.predictionFile += ' df.columns = df.columns.droplevel(0)'
self.predictionFile += '\\n'
self.predictionFile += ' df = df.reset_index()'
self.predictionFile += '\\n'
self.predictionFile += ' df0 = df.copy()'
self.predictionFile += '\\n'
if(learner_type != 'RecommenderSystem'): #task 11190
if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na':
self.predictionFile += ' df,datetimeFeature = profilerobj.apply_profiler(df)'
self.predictionFile += '\\n'
else:
self.predictionFile += ' df = profilerobj.apply_profiler(df)'
self.predictionFile += '\\n'
self.predictionFile += ' df = selectobj.apply_selector(df)'
self.predictionFile += '\\n'
#self.predictionFile += ' modelobj = '+classname+'()'
self.predictionFile += ' output = modelobj.predict(df,"")'
self.predictionFile += '\\n'
else:
self.predictionFile += ' output = colabobj.predict(df)'
self.predictionFile += '\\n'
if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na':
self.predictionFile += ' output = outputobj.apply_output_format(df0,output,datetimeFeature)'
self.predictionFile += '\\n'
else:
self.predictionFile += ' output = outputobj.apply_output_format(df0,output)'
self.predictionFile += '\\n'
self.predictionFile += ' print("predictions:",output)'
self.predictionFile += '\\n'
self.predictionFile += ' return(output)'
self.predictionFile += '\\n'
self.predictionFile += ' except KeyError as e:'
self.predictionFile += '\\n'
self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\\'"\\')}'
self.predictionFile += '\\n'
self.predictionFile += ' print("predictions:",json.dumps(output))'
self.predictionFile += '\\n'
self.predictionFile += ' return (json.dumps(output))'
self.predictionFile += '\\n'
self.predictionFile += ' except Exception as e:'
self.predictionFile += '\\n'
self.predictionFile += ' output = {"status":"FAIL","message":str(e).strip(\\'"\\')}'
self.predictionFile += '\\n'
self.predictionFile += ' print("predictions:",json.dumps(output))'
self.predictionFile += '\\n'
self.predictionFile += ' return (json.dumps(output))'
self.predictionFile += '\\n'
self.predictionFile += 'if __name__ == "__main__":'
self.predictionFile += '\\n'
self.predictionFile += ' output = predict(sys.argv[1])'
filename = os.path.join(deploy_path,'aion_predict.py')
f = open(filename, "w")
f.write(str(self.predictionFile))
f.close()
def create_classification_text_performance_file(self,deploy_path,features,target):
features = ",".join([feature for feature in features])
self.predictionFile = """\\
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
import json
import os
import sys
from pandas import json_normalize
# from evidently.dashboard import Dashboard
# from evidently.tabs import ClassificationPerformanceTab
from evidently.pipeline.column_mapping import ColumnMapping
from aion_predict import predict
from evidently.report import Report
from evidently.pipeline.column_mapping import ColumnMapping
from evidently.metric_preset import ClassificationPreset
def odrift(data):
try:
"""
self.predictionFile += ' features = \\''+features+'\\''
self.predictionFile += '\\n'
self.predictionFile += ' target = \\''+target+'\\''
self.predictionFile += '\\n'
self.predictionFile +="""\\
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
production = predict().run(jsonData['currentDataLocation'])
reference = predict().run(jsonData['trainingDataLocation'])
production = json.loads(production)
reference = json.loads(reference)
if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'):
production = production['data']
production = json_normalize(production)
reference = reference['data']
reference = json_normalize(reference)
production['target'] = production[target]
reference['target'] = reference[target]
column_mapping = ColumnMapping()
column_mapping.target = target
column_mapping.prediction = 'prediction'
column_mapping.datetime = None
column_mapping.text_features = features.split(',')
iris_model_performance_dashboard = Report(metrics=[ClassificationPreset()])
iris_model_performance_dashboard.run(reference_data=reference, current_data=production,column_mapping=column_mapping)
report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html')
iris_model_performance_dashboard.save_html(report)
metrics_output = iris_model_performance_dashboard.as_dict()
output = {"status":"SUCCESS","htmlPath":report, 'drift_details':metrics_output['metrics']}
print("drift:",json.dumps(output))
return (json.dumps(output))
except KeyError as e:
print(e)
output = {"status":"FAIL","message":str(e).strip('"')}
print("drift:",json.dumps(output))
return (json.dumps(output))
except Exception as e:
print(e)
output = {"status":"FAIL","message":str(e).strip('"')}
print("drift:",json.dumps(output))
return (json.dumps(output))
if __name__ == "__main__":
output = odrift(sys.argv[1])"""
filename = os.path.join(deploy_path,'aion_opdrift.py')
f = open(filename, "wb")
f.write(str(self.predictionFile).encode('utf8'))
f.close()
def create_classification_performance_file(self,deploy_path,features,target):
features = ",".join([feature for feature in features])
self.predictionFile = """\\
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
import json
import os
import sys
from pandas import json_normalize
from evidently.report import Report
from evidently.metric_preset import ClassificationPreset
from evidently.pipeline.column_mapping import ColumnMapping
from aion_predict import predict
def odrift(data):
try:
"""
self.predictionFile += ' features = \\''+features+'\\''
self.predictionFile += '\\n'
self.predictionFile += ' target = \\''+target+'\\''
self.predictionFile += '\\n'
self.predictionFile +="""\\
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
production = predict().run(jsonData['currentDataLocation'])
reference = predict().run(jsonData['trainingDataLocation'])
production = json.loads(production)
reference = json.loads(reference)
if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'):
production = production['data']
production = json_normalize(production)
reference = reference['data']
reference = json_normalize(reference)
production['target'] = production[target]
reference['target'] = reference[target]
column_mapping = ColumnMapping()
column_mapping.target = target
column_mapping.prediction = 'prediction'
column_mapping.datetime = None
column_mapping.numerical_features = features.split(',')
model_performance_dashboard = Report(metrics = [ClassificationPreset()])
model_performance_dashboard.run(reference_data =reference, current_data =production, column_mapping = column_mapping)
report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html')
model_performance_dashboard.save_html(report)
metrics_output = model_performance_dashboard.as_dict()
output = {"status":"SUCCESS","htmlPath":report, 'drift_details':metrics_output['metrics']}
print("drift:",json.dumps(output))
return (json.dumps(output))
else:
output = {"status":"SUCCESS","htmlPath":'NA'}
print("drift:",json.dumps(output))
return (json.dumps(output))
except KeyError as e:
print(e)
output = {"status":"FAIL","message":str(e).strip('"')}
print("drift:",json.dumps(output))
return (json.dumps(output))
except Exception as e:
print(e)
output = {"status":"FAIL","message":str(e).strip('"')}
print("drift:",json.dumps(output))
return (json.dumps(output))
if __name__ == "__main__":
output = odrift(sys.argv[1])"""
filename = os.path.join(deploy_path,'aion_opdrift.py')
f = open(filename, "wb")
f.write(str(self.predictionFile).encode('utf8'))
f.close()
def create_model_service(self,deploy_path,serviceName,problemType):
filedata = """
from flask import Flask, jsonify, request
from flask_restful import Resource, Api
from aion_predict import predict"""
if problemType.lower() == 'classification' or problemType.lower() == 'regression':
filedata += """
from aion_xai import local_analysis
from aion_ipdrift |
import drift
from aion_opdrift import odrift"""
filedata += """
import json
import os
import pandas as pd
import io
import argparse
from pathlib import Path
from flask_cors import CORS, cross_origin
app = Flask(__name__)
#cross origin resource from system arguments
parser = argparse.ArgumentParser()
parser.add_argument('-ip', '--ipaddress', help='IP Address')
parser.add_argument('-p', '--port', help='Port Number')
parser.add_argument("-cors", type=str, required=False)
d = vars(parser.parse_args())
modelPath = Path(__file__).parent
try:
with open( (modelPath/'etc')/'display.json', 'r') as f:
disp_data = json.load(f)
is_explainable = not disp_data.get('textFeatures')
except:
disp_data = {}
is_explainable = True
if "cors" in d.keys():
if d["cors"] != '' and d["cors"] != None:
d["cors"] = [s.strip() for s in d["cors"].split(",")]
#cors = CORS(app, resources={r"/AION/*": {"origins": ["http://localhost", "http://localhost:5000"]}})
cors = CORS(app, resources={r"/AION/*": {"origins": d["cors"]}})
api = Api(app)
class predictapi(Resource):
def get(self):
features = disp_data.get('modelFeatures')
if features:
msg=\\"""
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
\\""".format(displaymsg={ x:'Value' for x in features})
else:
displaymsg='Data in JSON Format'
return jsonify(displaymsg)
def post(self):
data = request.get_json()
output = predict().run(json.dumps(data))
return jsonify(json.loads(output))
class predictfileapi(Resource):
def post(self):
if 'file' in request.files:
file = request.files['file']
urlData = file.read()
rawData = pd.read_csv(io.StringIO(urlData.decode('utf-8')))
data = rawData.to_json(orient='records')
output = predict().run(data)
return jsonify(json.loads(output))
else:
displaymsg='File is mising'
return jsonify(displaymsg)
def get(self):
msg=\\"""
RequestType: POST
Body:send file content in body\\"""
return jsonify(msg)
"""
if problemType.lower() == 'classification' or problemType.lower() == 'regression':
filedata += """
class explainapi(Resource):
def get(self):
features = disp_data.get('modelFeatures')
if features:
msg=\\"""
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
\\""".format(displaymsg={ x:'Value' for x in features})
else:
displaymsg='Data in JSON Format'
return jsonify(displaymsg)
def post(self):
data = request.get_json()
if is_explainable:
output = local_analysis(json.dumps(data))
else:
output = json.dumps({"status":"FAIL","data":"explain api is not supported when text features are used for training"})
return jsonify(json.loads(output))
class monitoringapi(Resource):
def get(self):
return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'})
def post(self):
data = request.get_json()
output = drift(json.dumps(data))
return jsonify(json.loads(output))
class performanceapi(Resource):
def get(self):
return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'})
def post(self):
data = request.get_json()
output = odrift(json.dumps(data))
return jsonify(json.loads(output))
"""
filedata += """
api.add_resource(predictapi, '/AION/{serviceName}/predict')""".format(serviceName=serviceName)
filedata += """
api.add_resource(predictfileapi, '/AION/{serviceName}/predict_file')""".format(serviceName=serviceName)
if problemType.lower() == 'classification' or problemType.lower() == 'regression':
filedata += """
api.add_resource(explainapi, '/AION/{serviceName}/explain')
api.add_resource(monitoringapi, '/AION/{serviceName}/monitoring')
api.add_resource(performanceapi, '/AION/{serviceName}/performance')""".format(serviceName=serviceName)
filedata += """
if __name__ == '__main__':
args = parser.parse_args()
app.run(args.ipaddress,port = args.port,debug = True)"""
filename = os.path.join(deploy_path,'aion_service.py')
f = open(filename, "wb")
f.write(str(filedata).encode('utf8'))
f.close()
def create_regression_performance_file(self,deploy_path,features,target):
features = ",".join([feature for feature in features])
self.predictionFile = """\\
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
import json
import os
import sys
from pandas import json_normalize
from evidently.report import Report
from evidently.metric_preset import RegressionPreset
from evidently.pipeline.column_mapping import ColumnMapping
from aion_predict import predict
def odrift(data):
try:
"""
self.predictionFile += ' features = \\''+features+'\\''
self.predictionFile += '\\n'
self.predictionFile += ' target = \\''+target+'\\''
self.predictionFile += '\\n'
self.predictionFile +="""\\
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
production = predict().run(jsonData['currentDataLocation'])
reference = predict().run(jsonData['trainingDataLocation'])
production = json.loads(production)
reference = json.loads(reference)
if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'):
production = production['data']
production = json_normalize(production)
reference = reference['data']
reference = json_normalize(reference)
production['target'] = production[target]
reference['target'] = reference[target]
column_mapping = ColumnMapping()
column_mapping.target = target
column_mapping.prediction = 'prediction'
column_mapping.datetime = None
column_mapping.numerical_features = features.split(',')
iris_model_performance_dashboard = Report(metrics=[RegressionPreset()])
iris_model_performance_dashboard.run(reference_data = reference, current_data = production, column_mapping = column_mapping)
report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html')
iris_model_performance_dashboard.save_html(report)
metrics_output = iris_model_performance_dashboard.as_dict()
output = {"status":"SUCCESS","htmlPath":report, 'drift_details':metrics_output['metrics']}
print("drift:",json.dumps(output))
return (json.dumps(output))
else:
output = {"status":"SUCCESS","htmlPath":'NA'}
print("drift:",json.dumps(output))
return (json.dumps(output))
except KeyError as e:
print(e)
output = {"status":"FAIL","message":str(e).strip('"')}
print("drift:",json.dumps(output))
return (json.dumps(output))
except Exception as e:
print(e)
output = {"status":"FAIL","message":str(e).strip('"')}
print("drift:",json.dumps(output))
return (json.dumps(output))
if __name__ == "__main__":
output = odrift(sys.argv[1])"""
filename = os.path.join(deploy_path,'aion_opdrift.py')
f = open(filename, "wb")
f.write(str(self.predictionFile).encode('utf8'))
f.close()
def create_regression_text_performance_file(self,deploy_path,features,target):
features = ",".join([feature for feature in features])
self.predictionFile = """\\
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
import json
import os
import sys
from pandas import json_normalize
from aion_predict import predict
from evidently.report import Report
from evidently.pipeline.column_mapping import ColumnMapping
from evidently.metric_preset import RegressionPreset
def odrift(data):
try:
"""
self.predictionFile += ' features = \\''+features+'\\''
self.predictionFile += '\\n'
self.predictionFile += ' target = \\''+target+'\\''
self.predictionFile += '\\n'
self.predictionFile +="""\\
if os.path.splitext(data)[1] == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
else:
jsonData = json.loads(data)
production = predict().run(jsonData['currentDataLocation'])
reference = predict().run(jsonData['trainingDataLocation'])
production = json.loads(production)
reference = json.loads(reference)
if (production['status'] == 'SUCCESS' and reference['status'] == 'SUCCESS'):
production = production['data']
production = json_normalize(production)
reference = reference['data']
reference = json_normalize(reference)
production['target'] = production[target]
reference['target'] = reference[target]
column_mapping = ColumnMapping()
column_mapping.target = target
column_mapping.prediction = 'prediction'
column_mapping.datetime = None
column_mapping.numerical_features = features.split(',')
iris_model_performance_dashboard = Report(metrics=[RegressionPreset()])
iris_model_performance_dashboard.run(reference_data=reference, current_data=production,column_mapping=column_mapping)
report = os.path.join(os.path.dirname(os.path.abspath(__file__)),'log','performance.html')
iris_model_performance_dashboard.save_html(report)
metrics_output = iris_model_performance_dashboard.as_dict()
output = {"status":"SUCCESS","htmlPath":report, 'drift_details':metrics_output['metrics']}
print("drift:",json.dumps(output))
return (json.dumps(output))
else:
output = {"status":"SUCCESS","htmlPath":'NA'}
print("drift:",json.dumps(output))
return (json.dumps(output))
except KeyError as e:
print(e)
output = {"status":"FAIL","message":str(e).strip('"')}
print("drift:",json.dumps(output))
return (json.dumps(output))
except Exception as e:
print(e)
output = {"status":"FAIL","message":str(e).strip('"')}
print("drift:",json.dumps(output))
return (json.dumps(output))
if __name__ == "__main__":
output = odrift(sys.argv[1])"""
filename = os.path.join(deploy_path,'aion_opdrift.py')
f = open(filename, "wb")
f.write(str(self.predictionFile).encode('utf8'))
f.close()
def create_publish_service(self,datalocation,usecaseid,version,problemType):
filename = os.path.join(datalocation,'aion_publish_service.py')
if not os.path.exists(filename):
filedata = """
import sys
import json
import time
import sqlite3
import argparse
import pandas as pd
import io
from pathlib import Path
from datetime import datetime
filename = Path(__file__).parent/'config.json'
with open (filename, "r") as f:
data = json.loads(f.read())
modelVersion = str(data['version'])
modelPath = Path(__file__).parent/modelVersion
sys.path.append(str(modelPath))
try:
with open( (modelPath/'etc')/'display.json', 'r') as f:
disp_data = json.load(f)
is_explainable = not disp_data.get('textFeatures')
except:
disp_data = {}
is_explainable = True
from flask import Flask, jsonify, request
from flask_restful import Resource, Api
from flask_cors import CORS, cross_origin
from flask import Response
from aion_predict import predict
"""
if problemType.lower() == 'classification' or problemType.lower() == 'regression':
filedata += """
from aion_ipdrift import drift
from aion_opdrift import odrift
if is_explainable:
from aion_xai import local_analysis
"""
filedata += """
dataPath = Path(__file__).parent/'data'
dataPath.mkdir(parents=True, exist_ok=True)
app = Flask(__name__)
#cross origin resource from system arguments
parser = argparse.ArgumentParser()
parser.add_argument('-ip', '--ipaddress', help='IP Address')
parser.add_argument('-p', '--port', help='Port Number')
parser.add_argument("-cors", type=str, required=False)
d = vars(parser.parse_args())
if "cors" in d.keys():
if d["cors"] != '' and d["cors"] != None:
d["cors"] = [s.strip() for s in d["cors"].split(",")]
#cors = CORS(app, resources={r"/AION/*": {"origins": ["http://localhost", "http://localhost:5000"]}})
cors = CORS(app, resources={r"/AION/*": {"origins": d["cors"]}})
api = Api(app)
class sqlite_db():
def __init__(self, location, database_file=None):
if not isinstance(location, Path):
location = Path(location)
if database_file:
self.database_name = database_file
else:
self.database_name = location.stem + '.db'
db_file = str(location/self.database_name)
self.conn = sqlite3.connect(db_file)
self.cursor = self.conn.cursor()
self.tables = []
def table_exists(self, name):
if name in self.tables:
return True
elif name:
query = f"SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';"
listOfTables = self.cursor.execute(query).fetchall()
if len(listOfTables) > 0 :
self.tables.append(name)
return True
return False
def read(self, |
table_name,condition=''):
if condition == '':
return pd.read_sql_query(f"SELECT * FROM {table_name}", self.conn)
else:
return pd.read_sql_query(f"SELECT * FROM {table_name} WHERE {condition}", self.conn)
def create_table(self,name, columns, dtypes):
query = f'CREATE TABLE IF NOT EXISTS {name} ('
for column, data_type in zip(columns, dtypes):
query += f"'{column}' TEXT,"
query = query[:-1]
query += ');'
self.conn.execute(query)
return True
def update(self,table_name,updates,condition):
update_query = f'UPDATE {table_name} SET {updates} WHERE {condition}'
self.cursor.execute(update_query)
self.conn.commit()
return True
def write(self,data, table_name):
if not self.table_exists(table_name):
self.create_table(table_name, data.columns, data.dtypes)
tuple_data = list(data.itertuples(index=False, name=None))
insert_query = f'INSERT INTO {table_name} VALUES('
for i in range(len(data.columns)):
insert_query += '?,'
insert_query = insert_query[:-1] + ')'
self.cursor.executemany(insert_query, tuple_data)
self.conn.commit()
return True
def delete(self, name):
pass
def close(self):
self.conn.close()"""
filedata += """
app = Flask(__name__)
api = Api(app)
class predictapi(Resource):
def get(self):
features = disp_data.get('modelFeatures')
if features:
msg=\\"""
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
\\""".format(displaymsg={ x:'Value' for x in features})
else:
displaymsg='Data in JSON Format'
return jsonify(displaymsg)
def post(self):
sqlite_dbObj = sqlite_db(dataPath,'data.db')
if not sqlite_dbObj.table_exists('metrices'):
data = {'noOfPredictCalls':'0','noOfDriftCalls':'0',"noOfActualCalls":'0',"mid":'0'}
data = pd.DataFrame(data, index=[0])
sqlite_dbObj.create_table('metrices',data.columns, data.dtypes)
data = request.get_json()
output = predict().run(json.dumps(data))
outputobj = json.loads(output)
if outputobj['status'] == 'SUCCESS':
try:
df2 = pd.read_json(json.dumps(outputobj['data']), orient ='records')
if not sqlite_dbObj.table_exists('prodData'):
sqlite_dbObj.create_table('prodData',df2.columns, df2.dtypes)
sqlite_dbObj.write(df2,'prodData')
except:
pass
try:
data = sqlite_dbObj.read('metrices')
#print(data)
if len(data) == 0:
data = [{'mid':'0','noOfPredictCalls':'1','noOfDriftCalls':'0',"noOfActualCalls":'0'}]
data = pd.read_json(json.dumps(data), orient ='records')
sqlite_dbObj.write(data,'metrices')
else:
noofPredictCalls = int(data['noOfPredictCalls'].iloc[0])+1
sqlite_dbObj.update('metrices',"noOfPredictCalls = '"+str(noofPredictCalls)+"'","mid = 0")
except Exception as e:
print(e)
pass
return jsonify(json.loads(output))
class predictfileapi(Resource):
def post(self):
sqlite_dbObj = sqlite_db(dataPath,'data.db')
if not sqlite_dbObj.table_exists('metrices'):
data = {'noOfPredictCalls':'0','noOfDriftCalls':'0',"noOfActualCalls":'0',"mid":'0'}
data = pd.DataFrame(data, index=[0])
sqlite_dbObj.create_table('metrices',data.columns, data.dtypes)
if 'file' in request.files:
file = request.files['file']
urlData = file.read()
rawData = pd.read_csv(io.StringIO(urlData.decode('utf-8')))
data = rawData.to_json(orient='records')
output = predict().run(data)
outputobj = json.loads(output)
if outputobj['status'] == 'SUCCESS':
try:
df2 = pd.read_json(json.dumps(outputobj['data']), orient ='records')
if not sqlite_dbObj.table_exists('prodData'):
sqlite_dbObj.create_table('prodData',df2.columns, df2.dtypes)
sqlite_dbObj.write(df2,'prodData')
except:
pass
try:
data = sqlite_dbObj.read('metrices')
#print(data)
if len(data) == 0:
data = [{'mid':'0','noOfPredictCalls':'1','noOfDriftCalls':'0',"noOfActualCalls":'0'}]
data = pd.read_json(json.dumps(data), orient ='records')
sqlite_dbObj.write(data,'metrices')
else:
noofPredictCalls = int(data['noOfPredictCalls'].iloc[0])+1
sqlite_dbObj.update('metrices',"noOfPredictCalls = '"+str(noofPredictCalls)+"'","mid = 0")
except Exception as e:
print(e)
pass
return jsonify(json.loads(output))
else:
output = {'status':'error','msg':'File is missing'}
return jsonify(output)
"""
if problemType.lower() == 'classification' or problemType.lower() == 'regression':
filedata += """
class explainapi(Resource):
def get(self):
features = disp_data.get('modelFeatures')
if features:
msg=\\"""
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
\\""".format(displaymsg={ x:'Value' for x in features})
else:
displaymsg='Data in JSON Format'
return jsonify(displaymsg)
def post(self):
data = request.get_json()
if is_explainable:
output = local_analysis(json.dumps(data))
else:
output = json.dumps({"status":"FAIL","data":"explain api is not supported when text features are used for training"})
return jsonify(json.loads(output))
class monitoringapi(Resource):
def get(self):
return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'})
def post(self):
sqlite_dbObj = sqlite_db(dataPath,'data.db')
if not sqlite_dbObj.table_exists('monitoring'):
data = {'status':'No Drift','Msg':'No Input Drift Found','RecordTime':'Time','version':'1'}
data = pd.DataFrame(data, index=[0])
sqlite_dbObj.create_table('monitoring',data.columns, data.dtypes)
trainingDataPath = (modelPath/'data')/'preprocesseddata.csv.gz'
if not sqlite_dbObj.table_exists('prodData'):
return jsonify({'status':'Error','msg':'Prod data not available'})
data = sqlite_dbObj.read('prodData')
filetimestamp = str(int(time.time()))
dataFile = dataPath/('AION_' + filetimestamp+'.csv')
data.to_csv(dataFile, index=False)
data = request.get_json()
data={'trainingDataLocation':trainingDataPath,'currentDataLocation':dataFile}
output = drift(json.dumps(data))
outputData = json.loads(output)
status = outputData['status']
if status == 'SUCCESS':
Msg = str(outputData['data'])
else:
Msg = 'Error during drift analysis'
now = datetime.now() # current date and time
date_time = now.strftime("%m/%d/%Y, %H:%M:%S")
data = {'status':status,'Msg':Msg,'RecordTime':date_time,'version':modelVersion}
data = pd.DataFrame(data, index=[0])
sqlite_dbObj.write(data,'monitoring')
return jsonify(json.loads(output))"""
filedata += """
class matricesapi(Resource):
def get(self):
sqlite_dbObj = sqlite_db(dataPath,'data.db')
if sqlite_dbObj.table_exists('metrices'):
df1 = sqlite_dbObj.read('metrices')
else:
df1 = pd.DataFrame()
#print(df1)
if sqlite_dbObj.table_exists('monitoring'):
df2 = sqlite_dbObj.read('monitoring')
else:
df2 = pd.DataFrame()
msg = {'Deployed Version':str(modelVersion)}
if df1.shape[0] > 0:
msg.update({'noOfPredictCalls':str(df1['noOfPredictCalls'].iloc[0])})
else:
msg.update({'noOfPredictCalls':'0'})
driftDetails = []
for idx in reversed(df2.index):
driftd = {'version':str(df2.version[idx]),'status':str(df2.status[idx]),'recordTime':str(df2.RecordTime[idx]),'msg':str(df2.Msg[idx])}
driftDetails.append(driftd)
msg.update({'driftDetails':driftDetails})
return jsonify(msg)
class performanceapi(Resource):
def get(self):
return jsonify({'trainingDataLocation':'Training File Location','currentDataLocation':'production Location'})
def post(self):
sqlite_dbObj = sqlite_db(dataPath,'data.db')
if not sqlite_dbObj.table_exists('monitoring'):
data = {'status':'No Drift','Msg':'No Input Drift Found','RecordTime':'Time','version':'1'}
data = pd.DataFrame(data, index=[0])
sqlite_dbObj.create_table('monitoring',data.columns, data.dtypes)
trainingDataPath = (modelPath/'data')/'preprocesseddata.csv.gz'
if not sqlite_dbObj.table_exists('prodData'):
return jsonify({'status':'Error','msg':'Prod data not available'})
data = sqlite_dbObj.read('prodData')
filetimestamp = str(int(time.time()))
dataFile = dataPath/('AION_' + filetimestamp+'.csv')
data.to_csv(dataFile, index=False)
data = request.get_json()
data={'trainingDataLocation':trainingDataPath,'currentDataLocation':dataFile}
output = odrift(json.dumps(data))
return jsonify(json.loads(output))
"""
filedata += """
api.add_resource(predictapi, '/AION/{serviceName}/predict')
api.add_resource(predictfileapi, '/AION/{serviceName}/predict_file')
api.add_resource(matricesapi, '/AION/{serviceName}/metrices')""".format(serviceName=usecaseid)
if problemType.lower() == 'classification' or problemType.lower() == 'regression':
filedata += """
api.add_resource(explainapi, '/AION/{serviceName}/explain')
api.add_resource(monitoringapi, '/AION/{serviceName}/monitoring')
api.add_resource(performanceapi, '/AION/{serviceName}/performance')
""".format(serviceName=usecaseid)
filedata += """
if __name__ == '__main__':
args = parser.parse_args()
app.run(args.ipaddress,port = args.port,debug = True)"""
f = open(filename, "wb")
f.write(str(filedata).encode('utf8'))
f.close()
data = {'version':version}
filename = os.path.join(datalocation,'config.json')
with open(filename, "w") as outfile:
json.dump(data, outfile)
outfile.close() <s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os,sys
import platform
import json
import shutil
import logging
from pathlib import Path
def create_selector_file(self,deploy_path,features,pcaModel_pickle_file,bpca_features,apca_features,textFeatures,nonNumericFeatures,numericalFeatures,profiler,targetFeature, model_type,model,config=None):
self.selectorfile += 'import pandas as pd'
self.selectorfile += '\\n'
self.selectorfile += 'import joblib'
self.selectorfile += '\\n'
self.selectorfile += 'import os'
self.selectorfile += '\\n'
self.selectorfile += 'import numpy as np'
self.selectorfile += '\\n'
self.selectorfile += 'class selector(object):'
self.selectorfile += '\\n'
self.selectorfile += ' def apply_selector(self,df):'
self.selectorfile += '\\n'
if pcaModel_pickle_file != '':
self.selectorfile += " pcaModel = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','"+pcaModel_pickle_file+"'))"
self.selectorfile += '\\n'
self.selectorfile += ' bpca_features = '+str(bpca_features)
self.selectorfile += '\\n'
self.selectorfile += ' apca_features = '+str(apca_features)
self.selectorfile += '\\n'
self.selectorfile += ' df = pcaModel.transform(df[bpca_features])'
self.selectorfile += '\\n'
self.selectorfile += ' df = pd.DataFrame(df,columns=apca_features)'
self.selectorfile += '\\n'
if(len(features) != 0) and model_type != 'BM25':
if model_type.lower()!='anomaly_detection' and model.lower() != 'autoencoder':
self.selectorfile += ' df = df['+str(features)+']'
self.selectorfile += '\\n'
self.selectorfile += ' return(df)'
filename = os.path.join(deploy_path,'script','selector.py')
f = open(filename, "wb")
self.log.info('-------> Feature Selector File Location :'+filename)
f.write(str(self.selector |
file).encode('utf8'))
f.close()
featurefile = 'import json'
featurefile +='\\n'
featurefile += 'def getfeatures():'
featurefile +='\\n'
featurefile +=' try:'
featurefile +='\\n'
featurelist = []
if 'profiler' in config:
if 'input_features_type' in config['profiler']:
inputfeatures = config['profiler']['input_features_type']
for x in inputfeatures:
featurelt={}
featurelt['feature'] = x
print(x,inputfeatures[x])
if x == targetFeature:
featurelt['Type'] = 'Target'
else:
if inputfeatures[x] in ['int','int64','float','float64']:
featurelt['Type'] = 'Numeric'
elif inputfeatures[x] == 'object':
featurelt['Type'] = 'Text'
elif inputfeatures[x] == 'category':
featurelt['Type'] = 'Category'
else:
featurelt['Type'] = 'Unknown'
featurelist.append(featurelt)
featurefile +=' features = '+str(featurelist)
featurefile +='\\n'
featurefile +=' outputjson = {"status":"SUCCESS","features":features}'
featurefile +='\\n'
featurefile +=' output = json.dumps(outputjson)'
featurefile +='\\n'
featurefile +=' print("Features:",output)'
featurefile +='\\n'
featurefile +=' return(output)'
featurefile +='\\n'
featurefile +=' except Exception as e:'
featurefile +='\\n'
featurefile +=' output = {"status":"FAIL","message":str(e).strip(\\'"\\')}'
featurefile +='\\n'
featurefile +=' print("Features:",json.dumps(output))'
featurefile +='\\n'
featurefile +=' return (json.dumps(output))'
featurefile +='\\n'
featurefile +='if __name__ == "__main__":'
featurefile +='\\n'
featurefile +=' output = getfeatures()'
filename = os.path.join(deploy_path,'featureslist.py')
f = open(filename, "wb")
f.write(str(featurefile).encode('utf8'))
f.close()
def create_init_function_for_classification(self,modelfile,classes,learner_type,scoreParam,loss_matrix,optimizer,preprocessing_pipe,modelName,model_type,imageconfig):
self.modelfile += ' def __init__(self):'
self.modelfile += '\\n'
if (learner_type == 'ML' and model_type.lower()=='anomaly_detection' and modelName.lower()=="autoencoder"):
modelfile=modelfile.replace('.sav','')
self.modelfile+=" self.model = tf.keras.models.load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))"
self.modelfile += '\\n'
elif(learner_type == 'TextDL' or learner_type == 'DL'):
if modelName.lower() == 'googlemodelsearch':
self.modelfile += ' import autokeras as ak'
self.modelfile += '\\n'
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','modelsearch_rootdir','saved_model_onnx.onnx'))"
self.modelfile += '\\n'
else:
if scoreParam == 'recall':
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects={'recall': recall_m},compile=False)"
self.modelfile += '\\n'
self.modelfile += ' self.model.compile(loss=\\''+loss_matrix+'\\',optimizer=\\''+optimizer+'\\', metrics=[recall_m])'
self.modelfile += '\\n'
elif scoreParam == 'precision':
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects={'precision': precision_m},compile=False)"
self.modelfile += '\\n'
self.modelfile += ' self.model.compile(loss=\\''+loss_matrix+'\\',optimizer=\\''+optimizer+'\\', metrics=[precision_m])'
self.modelfile += '\\n'
elif scoreParam == 'roc_auc':
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),compile=False)"
self.modelfile += '\\n'
self.modelfile += ' self.model.compile(loss=\\''+loss_matrix+'\\',optimizer=\\''+optimizer+'\\', metrics=[tf.keras.metrics.AUC()])'
self.modelfile += '\\n'
elif scoreParam == 'f1_score':
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects={'f1_score': f1_m},compile=False)"
self.modelfile += '\\n'
self.modelfile += ' self.model.compile(loss=\\''+loss_matrix+'\\',optimizer=\\''+optimizer+'\\', metrics=[f1_m])'
self.modelfile += '\\n'
elif scoreParam == 'r2':
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects={'r2': r_square},compile=False)"
self.modelfile += '\\n'
self.modelfile += ' self.model.compile(loss=\\''+loss_matrix+'\\',optimizer=\\''+optimizer+'\\', metrics=[r_square])'
self.modelfile += '\\n'
elif scoreParam == 'rmse':
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects={'rmse': rmse_m},compile=False)"
self.modelfile += '\\n'
self.modelfile += ' self.model.compile(loss=\\''+loss_matrix+'\\',optimizer=\\''+optimizer+'\\', metrics=[rmse_m])'
self.modelfile += '\\n'
elif scoreParam == 'mse':
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))"
self.modelfile += '\\n'
elif scoreParam == 'mae':
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))"
self.modelfile += '\\n'
elif scoreParam == 'accuracy':
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))"
self.modelfile += '\\n'
else:
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))"
self.modelfile += '\\n'
elif(learner_type == 'Text Similarity'):
self.modelfile += " self.preprocessing = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','"+preprocessing_pipe+"'))"
self.modelfile += '\\n'
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'), custom_objects={'cosine_distance': cosine_distance, 'cos_dist_output_shape': cos_dist_output_shape})"
self.modelfile += '\\n'
elif(learner_type in ['similarityIdentification','contextualSearch']):
if scoreParam == 'VectorDB Cosine':
vectorfiledbname = 'trainingdataVecDB'
self.modelfile += f"\\
\\n persist_directory = os.path.join(os.path.dirname(__file__),'..','data')\\
\\n client = chromadb.PersistentClient(path=persist_directory)\\
\\n self.collection_name = '{vectorfiledbname}'\\
\\n self.collection = client.get_collection(self.collection_name)\\n"
else:
self.modelfile += " self.train_input = pd.read_csv(os.path.join(os.path.dirname(__file__),'..','data','trainingdata.csv'))\\n\\n"
elif(learner_type == 'ImageClassification'):
self.modelfile += ' self.config='+str(imageconfig)
self.modelfile += '\\n'
if(modelName.lower() == 'densenet'):
self.modelfile += ' baseModel = tf.keras.applications.DenseNet121(weights="imagenet", include_top=False, input_tensor=Input(shape=(self.config[\\'img_width\\'],self.config[\\'img_height\\'],self.config[\\'img_channel\\'])))'
else:
self.modelfile += ' baseModel = tensorflow.keras.applications.InceptionV3(weights="imagenet", include_top=False, input_tensor=Input(shape=(self.config[\\'img_width\\'],self.config[\\'img_height\\'],self.config[\\'img_channel\\'])))'
self.modelfile += '\\n'
self.modelfile += ' headModel = baseModel.output'
self.modelfile += '\\n'
self.modelfile += ' headModel = Flatten(name="flatten")(headModel)'
self.modelfile += '\\n'
self.modelfile += ' headModel = Dense(1024, activation=\\'relu\\')(headModel)'
self.modelfile += '\\n'
self.modelfile += ' headModel = Dropout(0.5)(headModel)'
self.modelfile += '\\n'
self.modelfile += ' headModel = Dense(2, activation=\\'sigmoid\\')(headModel)'
self.modelfile += '\\n'
self.modelfile += ' headModel = self.model = Model(inputs=baseModel.input, outputs=headModel)'
self.modelfile += '\\n'
self.modelfile += ' opt = Adam(lr=self.config[\\'lr\\'])'
self.modelfile += '\\n'
self.modelfile += ' self.model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])'
self.modelfile += '\\n'
self.modelfile += " self.model.load_weights(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))"
self.modelfile += '\\n'
elif(learner_type == 'objectDetection'):
self.modelfile += " self.MODEL_LOCATION = os.path.join(os.path.dirname(__file__),'..','model')\\n"
self.modelfile += ' PATH_TO_CFG = self.MODEL_LOCATION+"/export/pipeline.config"\\n'
self.modelfile += ' PATH_TO_CKPT = self.MODEL_LOCATION+"/export/checkpoint/"\\n'
self.modelfile += ' PATH_TO_LABELS = self.MODEL_LOCATION+"/export/label_map.pbtxt"\\n'
self.modelfile += ' configs = config_util.get_configs_from_pipeline_file(PATH_TO_CFG)\\n'
self.modelfile += ' self.detection_model = model_builder.build(model_config=configs["model"], is_training=False)\\n'
self.modelfile += ' ckpt = tf.compat.v2.train.Checkpoint(model=self.detection_model)\\n'
self.modelfile += ' ckpt.restore(os.path.join(PATH_TO_CKPT, "ckpt-0")).expect_partial()\\n'
self.modelfile += ' self.category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS,\\
use_display_name=True)\\n'
elif learner_type == 'TS' and (modelName.lower() == 'lstm' or modelName.lower() == 'mlp'):
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))"
self.modelfile += '\\n'
elif modelName.lower() == 'neural architecture search':
self.modelfile += ' import autokeras as ak'
self.modelfile += '\\n'
self.modelfile += " self.model = load_model(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'),custom_objects=ak.CUSTOM_OBJECTS)"
self.modelfile += '\\n'
else:
self.modelfile += " self.model = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','"+modelfile+"'))"
self.modelfile += '\\n'
def create_predict(self,learner_type,method,model,model_type,threshold,firstDocFeature,secondDocFeature,padding_length,optimizationmethod,sessonal_freq,additional_regressors,feature,modelFeatures,indexFeature,lag_order,scalertransformationFile,datetimeFeature,scoreParam=None):
self.modelfile += ' def predict(self,X,features_names):'
self.modelfile += '\\n'
if (learner_type == 'ML' and model_type.lower()=='anomaly_detection' and model.lower()=="autoencoder"):
self.modelfile += f" X=X[{feature}]\\n"
self.modelfile += f" X = np.asarray(X).astype('float32')\\n"
self.modelfile += f" reconstructed = self.model.predict(X)\\n"
self.modelfile += f" predict_loss = tf.keras.losses.mae(reconstructed,X)\\n"
self.modelfile += ' max_threshold = np.mean(predict_loss) + 2*np.std(predict_loss)\\n'
self.modelfile += ' min_threshold = np.mean(predict_loss) - 2*np.std(predict_loss)\\n'
self.modelfile += ' prediction_df = pd.DataFrame()\\n'
self.modelfile += ' prediction_df["loss"] = predict_loss\\n'
self.modelfile += ' prediction_df["max_threshold"] = max_threshold\\n |
'
self.modelfile += ' prediction_df["min_threshold"] = min_threshold\\n'
self.modelfile += ' prediction_df["anomaly"] = np.where((prediction_df["loss"] > prediction_df["max_threshold"]) | (prediction_df["loss"] <= prediction_df["min_threshold"]), True, False)\\n'
self.modelfile += ' return prediction_df\\n'
elif(learner_type == 'RecommenderSystem'):
self.modelfile += ' predictions = []'
self.modelfile += '\\n'
self.modelfile += ' for index,row in X.iterrows():'
self.modelfile += '\\n'
self.modelfile += ' score = self.model.predict(int(row["uid"]),int(row["iid"]))'
self.modelfile += '\\n'
self.modelfile += ' predictions.append(score.est)'
self.modelfile += '\\n'
self.modelfile += ' return predictions'
elif(learner_type in ['similarityIdentification','contextualSearch']):
tfeatures = list(modelFeatures.split(","))
if indexFeature != '' and indexFeature != 'NA':
ifeatures = indexFeature.split(",")
for ifes in ifeatures:
if ifes not in tfeatures:
tfeatures.append(ifes)
if model_type == 'BM25':
self.modelfile += f"\\n\\
tokenized_corpus =[doc.split(' ') for doc in self.train_input.tokenize]\\n\\
bm25 = BM25Okapi(tokenized_corpus)\\n\\
tokenized_query = [doc.split(' ') for doc in X.tokenize]\\n\\
logcnt = 5\\n\\
output = []\\n\\
for query in tokenized_query:\\n\\
doc_scores = bm25.get_scores(query)\\n\\
related_docs_indices = np.argsort(doc_scores)[::-1][:logcnt]\\n\\
x = self.train_input[{tfeatures}].loc[self.train_input.index[related_docs_indices]]\\n\\
x['Score'] = doc_scores[related_docs_indices]\\n\\
x['Score'] = round(x['Score'],2).astype(str)+'%'\\n\\
output.append(x)\\n\\
return output\\n"
elif scoreParam == 'VectorDB Cosine':
featuresVecDB = modelFeatures.split(",")
self.modelfile += ' logcnt = 5\\n'
self.modelfile += f" columns = {featuresVecDB}\\n"
self.modelfile += f"\\
\\n output = []\\
\\n for rowindex, row in X.iterrows():\\
\\n queryembedding = X.iloc[rowindex:rowindex+1].to_numpy()\\
\\n results = self.collection.query(\\
\\n query_embeddings=queryembedding.tolist(),\\
\\n n_results=logcnt\\
\\n )\\
\\n x = pd.DataFrame(columns=columns)\\
\\n for i in range(0, len(results['ids'][0])):\\
\\n documentAry = results['documents'][0][i]\\
\\n documentAry = documentAry.split(' ~&~ ')\\
\\n for j in range(0, len(documentAry)):\\
\\n x.at[i,columns[j]] = documentAry[j]\\
\\n x.at[i,'Score'] = results['distances'][0][i]\\
\\n output.append(x)\\
\\n return output"
else:
self.modelfile += ' columns = self.train_input.columns.tolist()\\n'
self.modelfile += ' logcnt = 5\\n'
self.modelfile += f" train_input = self.train_input[{tfeatures}]\\n"
for tf in tfeatures:
self.modelfile += f" columns.remove('{tf}')\\n"
self.modelfile += f"\\
\\n results = cosine_similarity(self.train_input[columns],X)\\
\\n output = []\\
\\n for i in range(results.shape[1]):\\
\\n related_docs_indices = results[:,i].argsort(axis=0)[:-(int(logcnt) + 1):-1]\\
\\n x=self.train_input[{tfeatures}].loc[self.train_input.index[related_docs_indices]]\\
\\n scores = []\\
\\n for j in range(0,logcnt):\\
\\n scores.append(str(round((results[related_docs_indices][j][i])*100))+'%')\\
\\n x['Score'] = scores\\
\\n output.append(x)\\
\\n return output"
elif(learner_type == 'Text Similarity'):
self.modelfile += ' X["'+firstDocFeature+'"] = X["'+firstDocFeature+'"].astype(str)'
self.modelfile += '\\n'
self.modelfile += ' X["'+secondDocFeature+'"] = X["'+secondDocFeature+'"].astype(str)'
self.modelfile += '\\n'
self.modelfile += ' test_sentence1 = self.preprocessing.texts_to_sequences(X["'+firstDocFeature+'"].values)'
self.modelfile += '\\n'
self.modelfile += ' test_sentence2 = self.preprocessing.texts_to_sequences(X["'+secondDocFeature+'"].values)'
self.modelfile += '\\n'
self.modelfile += ' test_sentence1 = pad_sequences(test_sentence1, maxlen='+str(padding_length)+', padding=\\'post\\')'
self.modelfile += '\\n'
self.modelfile += ' test_sentence2 = pad_sequences(test_sentence2, maxlen='+str(padding_length)+', padding=\\'post\\')'
self.modelfile += '\\n'
self.modelfile += ' prediction = self.model.predict([test_sentence1, test_sentence2 ])'
self.modelfile += '\\n'
self.modelfile += ' return(prediction)'
self.modelfile += '\\n'
elif(learner_type == 'ImageClassification'):
self.modelfile += ' predictions = []'
self.modelfile += '\\n'
self.modelfile += ' for index, row in X.iterrows(): '
self.modelfile += '\\n'
self.modelfile += ' img = cv2.imread(row[\\'imagepath\\'])'
self.modelfile += '\\n'
self.modelfile += ' img = cv2.resize(img, (self.config[\\'img_width\\'],self.config[\\'img_height\\']))'
self.modelfile += '\\n'
self.modelfile += ' img = image.img_to_array(img)'
self.modelfile += '\\n'
self.modelfile += ' img = np.expand_dims(img, axis=0)'
self.modelfile += '\\n'
self.modelfile += ' img = img/255'
self.modelfile += '\\n'
self.modelfile += ' prediction = self.model.predict(img)'
self.modelfile += '\\n'
self.modelfile += ' prediction = np.argmax(prediction,axis=1)'
self.modelfile += '\\n'
self.modelfile += ' predictions.append(prediction[0])'
self.modelfile += '\\n'
self.modelfile += ' return(predictions)'
self.modelfile += '\\n'
elif(learner_type == 'objectDetection'):
self.modelfile += ' @tf.function\\n'
self.modelfile += ' def detect_fn(image):\\n'
self.modelfile += ' image, shapes = self.detection_model.preprocess(image)\\n'
self.modelfile += ' prediction_dict = self.detection_model.predict(image, shapes)\\n'
self.modelfile += ' detections = self.detection_model.postprocess(prediction_dict, shapes)\\n'
self.modelfile += ' return detections\\n'
self.modelfile += ' def load_image_into_numpy_array(path):\\n'
self.modelfile += ' return np.array(Image.open(path))\\n'
self.modelfile += ' imageLocation = []\\n'
self.modelfile += ' for i, row in X.iterrows():\\n'
self.modelfile += ' if ("confidance" in row) and row["confidance"] <= 1.0:\\n'
self.modelfile += ' confidance = row["confidance"]\\n'
self.modelfile += ' else:\\n'
self.modelfile += ' confidance = 0.8\\n'
self.modelfile += ' imageName = str(Path(row["imagepath"]).stem)+"_output"+str(Path(row["imagepath"]).suffix)\\n'
self.modelfile += ' image_np = load_image_into_numpy_array(row["imagepath"])\\n'
self.modelfile += ' input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)\\n'
self.modelfile += ' detections = detect_fn(input_tensor)\\n'
self.modelfile += ' num_detections = int(detections.pop("num_detections"))\\n'
self.modelfile += ' detections = {key: value[0, :num_detections].numpy()\\n\\
for key, value in detections.items()}\\n'
self.modelfile += ' detections["num_detections"] = num_detections\\n'
self.modelfile += ' detections["detection_classes"] = detections["detection_classes"].astype(np.int64)\\n'
self.modelfile += ' label_id_offset = 1\\n'
self.modelfile += ' image_np_with_detections = image_np.copy()\\n'
self.modelfile += ' viz_utils.visualize_boxes_and_labels_on_image_array(\\n\\
image_np_with_detections,\\n\\
detections["detection_boxes"],\\n\\
detections["detection_classes"]+label_id_offset,\\n\\
detections["detection_scores"],\\n\\
self.category_index,\\n\\
use_normalized_coordinates=True,\\n\\
max_boxes_to_draw=200,\\n\\
min_score_thresh=confidance,\\n\\
agnostic_mode=False)\\n'
self.modelfile += ' plt.figure()\\n'
self.modelfile += ' plt.imsave(os.path.join(self.MODEL_LOCATION,imageName), image_np_with_detections)\\n'
self.modelfile += ' imageLocation.append(os.path.join(self.MODEL_LOCATION,imageName))\\n'
self.modelfile += ' plt.show()\\n'
self.modelfile += ' return imageLocation\\n'
else:
if(learner_type == 'DL' and model != 'Neural Network'):
self.modelfile += ' X = np.expand_dims(X, axis=2)'
self.modelfile += '\\n'
if(learner_type == 'TextDL'):
self.modelfile += ' return pd.DataFrame(np.argmax(self.model.predict(X),axis=1))'
self.modelfile += '\\n'
elif(learner_type == 'TextML'):
self.modelfile += ' return pd.DataFrame(self.model.predict_proba(X),columns=self.model.classes_)'
self.modelfile += '\\n'
elif(learner_type == 'DL' and model_type == 'Classification'):
self.modelfile += ' X = X.astype(np.float32)'
self.modelfile += '\\n'
self.modelfile += ' return pd.DataFrame(np.argmax(self.model.predict(X),axis=1))'
self.modelfile += '\\n'
else:
if(model_type == 'Classification' or model_type == 'TLClassification'):
if model == 'Neural Architecture Search':
self.modelfile += ' X = X.astype(np.float32)'
self.modelfile += '\\n'
self.modelfile += ' return pd.DataFrame(self.model.predict(X))'
self.modelfile += '\\n'
else:
if optimizationmethod == 'genetic':
self.modelfile += '\\n'
self.modelfile += ' try:'
self.modelfile += '\\n'
self.modelfile += ' return pd.DataFrame(self.model.predict_proba(X))'
self.modelfile += '\\n'
self.modelfile += ' except:'
self.modelfile += '\\n'
self.modelfile += ' return pd.DataFrame(self.model.predict(X))'
else:
self.modelfile += ' X = X.astype(np.float32)'
self.modelfile += '\\n'
if model.lower() == 'deep q network' or model.lower() == 'dueling deep q network':
self.modelfile += ' q, _ = self.model(np.array(X), step_type=constant([time_step.StepType.FIRST] * np.array(X).shape[0]), training=False)'
self.modelfile += '\\n'
self.modelfile += ' return pd.DataFrame(q.numpy())'
else:
self.modelfile += ' return pd.DataFrame(self.model.predict_proba(X), columns=self.model.classes_)'
self.modelfile += '\\n'
elif model_type == 'Regression' and model == 'NAS':
self.modelfile += \\
"""
X = X.astype(np.float32)
return self.model.predict(X)
"""
elif(learner_type == 'TS'):
if model.lower() == 'fbprophet':
self.modelfile += ' sessonal_freq="'+str(sessonal_freq)+'"'
self.modelfile += '\\n'
self.modelfile += ' ts_prophet_future = self.model.make_future_dataframe(periods=int(X["noofforecasts"][0]),freq=sessonal_freq,include_history = False)'
self.modelfile += '\\n'
if (additional_regressors):
self.modelfile += '\\n'
self.modelfile += ' additional_regressors=' |
+str(additional_regressors)
self.modelfile += '\\n'
self.modelfile += ' ts_prophet_future[additional_regressors] = dataFrame[additional_regressors]'
self.modelfile += '\\n'
|
cing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from importlib.metadata import version
import sys
class importModule():
def __init__(self):
self.importModule = {}
self.stdlibModule = []
self.localModule = {}
def addLocalModule(self,module, mod_from=None, mod_as=None):
if module == '*':
if module not in self.localModule.keys():
self.localModule[module]= [mod_from]
else:
self.localModule[module].append(mod_from)
elif module not in self.localModule.keys():
self.localModule[module] = {'from':mod_from, 'as':mod_as}
def addModule(self, module, mod_from=None, mod_as=None):
if module not in self.importModule.keys():
self.importModule[module] = {'from':mod_from, 'as':mod_as}
if module in sys.stdlib_module_names:
self.stdlibModule.append(module)
elif isinstance(self.importModule[module], list):
if mod_as not in [x['as'] for x in self.importModule[module]]:
self.importModule[module].append({'from':mod_from, 'as':mod_as})
elif mod_as not in [x['from'] for x in self.importModule[module]]:
self.importModule[module].append({'from':mod_from, 'as':mod_as})
elif mod_as != self.importModule[module]['as']:
as_list = [self.importModule[module]]
as_list.append({'from':mod_from, 'as':mod_as})
self.importModule[module] = as_list
elif mod_from != self.importModule[module]['from']:
as_list = [self.importModule[module]]
as_list.append({'from':mod_from, 'as':mod_as})
self.importModule[module] = as_list
def getModules(self):
return (self.importModule, self.stdlibModule)
def getBaseModule(self, extra_importers=[]):
modules_alias = { 'sklearn':'scikit-learn',
'genetic_selection':'sklearn-genetic',
'google': 'google-cloud-storage',
'azure':'azure-storage-file-datalake'}
local_modules = {'AIX':'/app/AIX-0.1-py3-none-any.whl'}
modules = []
require = ""
if extra_importers:
extra_importers = [importer.importModule for importer in extra_importers if isinstance(importer, importModule)]
importers_module = [self.importModule] + extra_importers
for importer_module in importers_module:
for k,v in importer_module.items():
if v['from']:
mod = v['from'].split('.')[0]
else:
mod = k
if mod in modules_alias.keys():
mod = modules_alias[mod]
modules.append(mod)
modules = list(set(modules))
for mod in modules:
try:
if mod in local_modules.keys():
require += f"{local_modules[mod]}\\n"
else:
require += f"{mod}=={version(mod)}\\n"
except :
if mod not in sys.stdlib_module_names:
raise
return require
def getCode(self):
def to_string(k, v):
mod = ''
if v['from']:
mod += 'from {} '.format(v['from'])
mod += 'import {}'.format(k)
if v['as']:
mod += ' as {} '.format(v['as'])
return mod
modules = ""
local_modules = ""
std_lib_modules = ""
third_party_modules = ""
for k,v in self.importModule.items():
if k in self.stdlibModule:
std_lib_modules = std_lib_modules + '\\n' + to_string(k, v)
elif isinstance(v, dict):
third_party_modules = third_party_modules + '\\n' + to_string(k, v)
elif isinstance(v, list):
for alias in v:
third_party_modules = third_party_modules + '\\n' + to_string(k, alias)
for k,v in self.localModule.items():
if k != '*':
local_modules = local_modules + '\\n' + to_string(k, v)
else:
for mod_from in v:
local_modules = local_modules + '\\n' + f'from {mod_from} import {k}'
if std_lib_modules:
modules = modules + "\\n#Standard Library modules" + std_lib_modules
if third_party_modules:
modules = modules + "\\n\\n#Third Party modules" + third_party_modules
if local_modules:
modules = modules + "\\n\\n#local modules" + local_modules + '\\n'
return modules
def copyCode(self, importer):
self.importModule, self.stdlibModule = importer.getModules()
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os,sys
import platform
import json
import shutil
import logging
from pathlib import Path
from prediction_package import production
from prediction_package import prediction_transformation as cs
class DeploymentManager:
def __init__(self):
self.requirementfile=''
self.modelfile=''
self.s2i_environmentfile=''
self.selectorfile=''
self.profilerfile=''
self.readmepackagename=''
self.pythonpackage=''
self.log = logging.getLogger('eion')
def include_import_file(self,learner_type,method,scoreParam,model_type,model):
if((learner_type == 'DL') or (learner_type == 'TextDL')):
self.modelfile += 'from tensorflow.keras.models import load_model'
self.modelfile += '\\n'
self.modelfile += 'from tensorflow.keras import backend as K'
self.modelfile += '\\n'
self.modelfile += 'import tensorflow as tf'
self.modelfile += '\\n'
if (learner_type == 'ML' and model_type.lower()=='anomaly_detection' and model.lower() == 'autoencoder'):
self.modelfile += 'import joblib'
self.modelfile += '\\n'
self.modelfile += 'import os'
self.modelfile += '\\n'
self.modelfile += 'import pandas as pd'
self.modelfile += '\\n'
self.modelfile += 'import numpy as np'
self.modelfile += '\\n'
self.modelfile += 'from pathlib import Path'
self.modelfile += '\\n'
self.modelfile += 'import tensorflow as tf'
self.modelfile += '\\n'
self.modelfile += 'from keras.models import load_model'
self.modelfile += '\\n'
self.modelfile += 'import warnings'
self.modelfile += '\\n'
self.modelfile += 'from sklearn.preprocessing import StandardScaler'
self.modelfile += '\\n'
self.modelfile += 'warnings.filterwarnings("ignore")'
self.modelfile += '\\n'
if(learner_type == 'ImageClassification'):
self.modelfile += 'import os'
self.modelfile += '\\n'
self.modelfile += 'from tensorflow.keras.models import Sequential'
self.modelfile += '\\n'
self.modelfile += 'from tensorflow.keras.layers import Dense, Dropout, Flatten'
self.modelfile += '\\n'
self.modelfile += 'from tensorflow.keras.preprocessing import image'
self.modelfile += '\\n'
self.modelfile += 'import numpy as np'
self.modelfile += '\\n'
self.modelfile += 'import tensorflow as tf'
self.modelfile += '\\n'
self.modelfile += 'from tensorflow.keras.layers import Input'
self.modelfile += '\\n'
self.modelfile += 'from tensorflow.keras.models import Model'
self.modelfile += '\\n'
self.modelfile += 'from tensorflow.keras.optimizers import Adam'
self.modelfile += '\\n'
self.modelfile += 'import cv2'
self.modelfile += '\\n'
if(learner_type == 'objectDetection'):
self.modelfile += 'import os\\n'
self.modelfile += 'from object_detection.utils import label_map_util\\n'
self.modelfile += 'from object_detection.utils import config_util\\n'
self.modelfile += 'from object_detection.utils import visualization_utils as viz_utils\\n'
self.modelfile += 'from object_detection.builders import model_builder\\n'
self.modelfile += 'import tensorflow as tf\\n'
self.modelfile += 'import numpy as np\\n'
self.modelfile += 'from PIL import Image\\n'
self.modelfile += 'import matplotlib.pyplot as plt\\n'
self.modelfile += 'import pandas as pd\\n'
self.modelfile += 'from pathlib import Path\\n'
if(learner_type == 'Text Similarity'):
self.modelfile += 'from tensorflow.keras.models import load_model'
self.modelfile += '\\n'
self.modelfile += 'from tensorflow.keras import backend as K'
self.modelfile += '\\n'
self.modelfile += 'from tensorflow.keras.preprocessing.sequence import pad_sequences'
self.modelfile += '\\n'
self.modelfile += 'from tensorflow.keras.preprocessing.text import Tokenizer'
self.modelfile += '\\n'
self.modelfile += 'import tensorflow as tf'
self.modelfile += '\\n'
if(model == 'Neural Architecture Search'):
self.modelfile += 'from tensorflow.keras.models import load_model'
self.modelfile += '\\n'
self.modelfile += 'from tensorflow.keras import backend as K'
self.modelfile += '\\n'
self.modelfile += 'import tensorflow as tf'
self.modelfile += '\\n'
self.modelfile += 'import joblib'
self.modelfile += '\\n'
self.modelfile += 'import os'
self.modelfile += '\\n'
self.modelfile += 'import pandas as pd'
self.modelfile += '\\n'
self.modelfile += 'from sklearn.decomposition import LatentDirichletAllocation\\n'
self.modelfile += 'import numpy as np\\n'
self.modelfile += 'from pathlib import Path\\n'
if model.lower() == 'deep q network' or model.lower() == 'dueling deep q network':
self.modelfile += 'from tensorflow import constant'
self.modelfile += '\\n'
self.modelfile += 'from tf_agents.trajectories import time_step'
self.modelfile += '\\n'
self.requirementfile += 'tensorflow==2.5.0'
if model.lower() == 'lstm' or model.lower() == 'mlp':
self.modelfile += 'from tensorflow.keras.models import load_model'
self.modelfile += '\\n'
self.requirementfile += 'tensorflow==2.5.0'
if(learner_type == 'Text Similarity'):
self.modelfile += 'def cosine_distance(vests):'
self.modelfile += '\\n';
self.modelfile += ' x, y = vests'
self.modelfile += '\\n';
self.modelfile += ' x = K.l2_normalize(x, axis=-1)'
self.modelfile += '\\n';
self.modelfile += ' y = K.l2_normalize(y, axis=-1)'
self.modelfile += '\\n';
self.modelfile += ' return -K.mean(x * y, axis=-1, keepdims=True)'
self.modelfile += '\\n';
self.modelfile += 'def cos_dist_output_shape(shapes):'
self.modelfile += '\\n';
self.modelfile += ' shape1, shape2 = shapes'
self.modelfile += '\\n';
self.modelfile += ' return (shape1[0],1)'
self.modelfile += '\\n';
if(learner_type == 'TextDL' or learner_type == 'DL'):
if(scoreParam.lower() == 'recall' or scoreParam.lower() == 'f1_score'):
self.modelfile += 'def recall_m(y_true, y_pred):'
self.modelfile += '\\n';
self.modelfile += ' true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))'
self.modelfile += '\\n';
self.modelfile += ' possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))'
self.modelfile += '\\n';
self.modelfile += ' recall = true_positives / (possible_positives + K.epsilon())'
self.modelfile += '\\n';
self.modelfile += ' return recall'
self.modelfile += '\\n';
if(scoreParam.lower() == 'precision' or scoreParam.lower() == 'f1_score'):
self.modelfile += 'def precision_m(y_true, y_pred):'
self.modelfile += '\\n';
self.modelfile += ' true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))'
self.modelfile += '\\n';
self.modelfile += ' predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))'
self.modelfile += '\\n';
self.modelfile += ' precision = true_positives / (predicted_positives + K.epsilon())'
self.modelfile += '\\n';
self.modelfile += ' return precision'
self.modelfile += '\\n';
if(scoreParam.lower() == 'f1_score'):
self.modelfile += 'def f1_m(y_true, y_pred):'
self.modelfile += '\\n';
self.modelfile += ' precision = precision_m(y_true, y_pred)'
self.modelfile += '\\n';
self.modelfile += ' recall = recall_m(y_true, y_pred)'
self.modelfile += '\\n'; |
self.modelfile += ' return 2*((precision*recall)/(precision+recall+K.epsilon()))'
self.modelfile += '\\n';
if(scoreParam.lower() == 'rmse'):
self.modelfile += 'def rmse_m(y_true, y_pred):'
self.modelfile += '\\n';
self.modelfile += ' return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))'
self.modelfile += '\\n';
if(scoreParam.lower() =='r2'):
self.modelfile += 'def r_square(y_true, y_pred):'
self.modelfile += '\\n';
self.modelfile += ' SS_res = K.sum(K.square(y_true-y_pred))'
self.modelfile += '\\n';
self.modelfile += ' SS_tot = K.sum(K.square(y_true-K.mean(y_true)))'
self.modelfile += '\\n';
self.modelfile += ' return (1 - SS_res/(SS_tot+K.epsilon()))'
self.modelfile += '\\n';
if(learner_type.lower() in ['similarityidentification','contextualsearch']):
self.modelfile += 'from pathlib import Path\\n'
if model_type == 'BM25':
self.modelfile += 'from rank_bm25 import BM25Okapi\\n'
elif scoreParam == 'VectorDB Cosine':
self.modelfile += 'import chromadb\\n'
else:
self.modelfile += 'from sklearn.metrics.pairwise import cosine_similarity\\n'
self.pythonpackage += '========== Python Packags Requires ========='
self.pythonpackage += '\\n'
self.pythonpackage += 'scikit-learn'
self.pythonpackage += '\\n'
self.pythonpackage += 'scipy'
self.pythonpackage += '\\n'
self.pythonpackage += 'numpy'
self.pythonpackage += '\\n'
if((learner_type == 'DL') or (learner_type =='TextDL')):
self.modelfile += 'import numpy as np'
self.modelfile += '\\n'
self.requirementfile += 'scikit-learn==0.21.3'
self.requirementfile += '\\n'
self.requirementfile += 'scipy==1.3.3'
self.requirementfile += '\\n'
self.requirementfile += 'numpy==1.17.4'
self.requirementfile += '\\n'
if(learner_type == 'TextML'):
self.requirementfile += 'spacy==2.2.3'
self.requirementfile += '\\n'
self.requirementfile += 'https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-2.2.0/en_core_web_sm-2.2.0.tar.gz'
self.requirementfile += '\\n'
if(learner_type == 'DL' or learner_type == 'TextDL'):
self.requirementfile += 'keras==2.3.1'
self.requirementfile += '\\n'
self.requirementfile += 'tensorflow==2.0.0b1'
self.requirementfile += '\\n'
if(learner_type == 'RecommenderSystem'):
self.requirementfile += 'surprise'
self.requirementfile += '\\n'
if(method == 'package'):
self.modelfile += 'import surprise'
self.modelfile += '\\n'
self.modelfile += 'import statsmodels'
self.modelfile += '\\n'
self.requirementfile += 'statsmodels==0.10.2'
self.requirementfile += '\\n'
def crate_readme_file(self,deploy_path,modelfile,features,method,single_file=False):
self.readme='========== Files Structures =========='
self.readme+='\\n'
self.readme+=modelfile+' ------ Trained Model'
self.readme+='\\n'
self.readme+='aion_prediction.py --> Python package entry point'
self.readme+='\\n'
if not single_file:
self.readme+='script/inputprofiler.py --> Profiling like FillNA and Category to Numeric'
self.readme+='\\n'
self.readme+='script/selector.py --> Feature Selection'
self.readme+='\\n'
self.readme+='script/trained_model.py --> Read the model file and call the prediction'
self.readme+='\\n'
self.readme+='script/output_format.py --> Output formatter file'
self.readme+='\\n'
self.readme+= self.pythonpackage
self.readme+= '========== How to call the model =========='
self.readme+='\\n'
self.readme+= '============== From Windows Terminal =========='
self.readme+='\\n'
if method == 'optimus_package':
self.readme += 'python aion_prediction.py filename.json'
self.readme +='\\n'
self.readme += '========== Embedded Methods =========='
self.readme +='\\n'
self.readme += 'Function Name: predict_from_json - When input is Json Data'
self.readme +='\\n'
self.readme += 'Function Name: predict_from_file - When input is Json File'
self.readme +='\\n'
else:
callpython = 'python aion_prediction.py "[{'
for x in features:
if(callpython != 'python prediction.py "[{'):
callpython += ','
callpython += '\\\\\\"'+str(x)+'\\\\\\"'+':'+'\\\\\\"'+str(x)+'_value'+'\\\\\\"'
callpython += '}]"'
self.readme += callpython
self.readme+='\\n'
self.readme+= '============== From Linux Terminal =========='
self.readme+='\\n'
callpython = 'python aion_prediction.py \\'[{'
temp =callpython
for x in features:
if(callpython != temp):
callpython += ','
callpython += '"'+str(x)+'"'+':'+'"'+str(x)+'_value'+'"'
callpython += '}]\\''
self.readme += callpython
self.readme+='\\n'
self.readme+= '============== Output =========='
self.readme+='\\n'
self.readme+= '{"status":"SUCCESS","data":[{"Data1":"Value","prediction":"Value"}]}' ## For Single Row/Record'
self.readme+='\\n'
self.readme+= '{"status":"SUCCESS","data":[{"Data1":"Value","prediction":"Value"},{"Data1":"Value","prediction":"Value"}]} ## For Multiple Row/Record'
self.readme+='\\n'
self.readme+= '{"status":"ERROR","message":"description"} ## In Case Exception or Error'
self.readme+='\\n'
#print(self.readme)
filename = os.path.join(deploy_path,'readme.txt')
self.log.info('-------> Readme File Location: '+filename)
f = open(filename, "wb")
f.write(str(self.readme).encode('utf8'))
f.close()
def create_class(self,classname):
#self.modelfile += 'class '+classname+'(object):'
self.modelfile += 'class trained_model(object):'
self.modelfile += '\\n'
def profiler_code(self,model_type,model,output_columns, features, text_feature,wordToNumericFeatures=[], deploy={},datetimeFeature=''):
profiler = deploy.get('profiler',{})
if isinstance(features, str):
features = features.split(',')
code = f"""
import scipy
import joblib
import numpy as np
import pandas as pd
from pathlib import Path
"""
if text_feature:
code += """
import importlib.util\\n"""
if wordToNumericFeatures:
code += """
from word2number import w2n
def s2n(value):
try:
x=eval(value)
return x
except:
try:
return w2n.word_to_num(value)
except:
return np.nan
"""
if 'code' in deploy.get('preprocess',{}).keys():
code += deploy['preprocess']['code']
if profiler.get('conversion_method','').lower() == 'glove':
code += """
class inputprofiler(object):
def __init__(self):
self.model = None
preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl'
if preprocess_path.exists():
self.model = joblib.load(preprocess_path)
from text.Embedding import load_pretrained
from text import TextProcessing
model_path = TextProcessing.checkAndDownloadPretrainedModel('glove')
embed_size, loaded_model = load_pretrained(model_path)
self.model.set_params(text_process__vectorizer__external_model = loaded_model)
else:
raise ValueError('Preprocess model not found')
def apply_profiler(self,df):
df = df.replace(r'^\\s*$', np.NaN, regex=True)
"""
elif profiler.get('conversion_method','').lower() == 'fasttext':
code += """
def get_pretrained_model_path():
try:
from AION.appbe.dataPath import DATA_DIR
modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextProcessing'
except:
modelsPath = Path('aion')/'PreTrainedModels'/'TextProcessing'
if not modelsPath.exists():
modelsPath.mkdir(parents=True, exist_ok=True)
return modelsPath
class inputprofiler(object):
def __init__(self):
self.model = None
preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl'
if preprocess_path.exists():
self.model = joblib.load(preprocess_path)
if not importlib.util.find_spec('fasttext'):
raise ValueError('fastText not installed')
else:
import os
import fasttext
import fasttext.util
cwd = os.getcwd()
os.chdir(get_pretrained_model_path())
fasttext.util.download_model('en', if_exists='ignore')
loaded_model = fasttext.load_model('cc.en.300.bin')
os.chdir(cwd)
self.model.set_params(text_process__vectorizer__external_model = loaded_model)
self.model.set_params(text_process__vectorizer__external_model_type = 'binary')
else:
raise ValueError('Preprocess model not found')
def apply_profiler(self,df):
df = df.replace(r'^\\s*$', np.NaN, regex=True)
"""
else:
code += """
class inputprofiler(object):
def __init__(self):
self.model = None
preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl'
if preprocess_path.exists():
self.model = joblib.load(preprocess_path)
else:
raise ValueError('Preprocess model not found')
def apply_profiler(self,df):
df = df.replace(r'^\\s*$', np.NaN, regex=True)
"""
if 'code' in deploy.get('preprocess',{}).keys():
code += " df = preprocess( df)\\n"
if wordToNumericFeatures:
code += f"""
df[{wordToNumericFeatures}] = df[{wordToNumericFeatures}].apply(lambda x: s2n(x))"""
if profiler.get('unpreprocessed_columns'):
code += f"""
unpreprocessed_data = df['{profiler['unpreprocessed_columns'][0]}']
df.drop(['{profiler['unpreprocessed_columns'][0]}'], axis=1,inplace=True)
"""
if profiler.get('force_numeric_conv'):
code += f"""
df[{profiler['force_numeric_conv']}] = df[{profiler['force_numeric_conv']}].apply(pd.to_numeric,errors='coerce')
"""
code += f"""
if self.model:
df = self.model.transform(df)"""
code += f"""
columns = {output_columns}
if isinstance(df, scipy.sparse.spmatrix):
df = pd.DataFrame(df.toarray(), columns=columns)
else:
df = pd.DataFrame(df, columns=columns)
"""
##The below if loop for avoiding unpreprocessed column variable storing which is not used for anomaly detection
if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na':
pass
else:
if profiler.get('unpreprocessed_columns'):
code += f"""
df['{profiler.get('unpreprocessed_columns')[0]}'] = unpreprocessed_data
"""
if model_type.lower() == 'anomaly_detection' and datetimeFeature != '' and datetimeFeature.lower() != 'na':
##This below set_index is wrong, because we drop datetimefeature before profiling and doing set_index. So commented now.
# code += f"""
# df.set_index('{datetimeFeature}', inplace=True)"""
code += f"""
return(df,'{datetimeFeature}')\\n"""
else:
code += f"""
return(df)"""
return code
def no_profiling_code(self, features):
if isinstance(features, str):
features = features.split(',')
return f"""
import pandas as pd
import numpy as np
class inputprofiler(object):
def apply_profiler(self,df):
df = df.replace(r'^\\s*$', np.NaN, regex=True)
return df[{features}]
"""
def create_profiler_file(self,learner_type,deploy_path,profiler,features,numericToLabel_json,column_merge_flag,text_features,preprocessing_pipe,firstDocFeature,secondDocFeature,normalizer,normFeatures,wordToNumericFeatures,conversion_method,model_type,preprocess_pipe,preprocess_out_columns, label_encoder,model, config=None,datetimeFeature=''):
filename = str(Path(deploy_path)/'script'/'inputprofiler.py')
if 'profiler' in config:
if model_type == 'BM25':
code = self.profiler_code(model_type,model,['tokenize'],features, text_features,config['profiler']['word2num_features'])
elif model == 'KaplanMeierFitter':
code = self.no_profiling_code(features)
elif model.lower() in ['arima', 'fbprophet']: #task 12627
code = self.no_profiling_code('nooff |
orecasts')
else:
code = self.profiler_code(model_type,model,config['profiler']['output_features'],features, text_features,config['profiler']['word2num_features'],config,datetimeFeature)
if code:
with open(filename,'w',encoding="utf-8") as f:
f.write(code)
self.log.info('-------> Profiler File Location :'+filename)
return
self.profilerfile += 'import pandas as pd'
self.profilerfile += '\\n'
self.profilerfile += 'import joblib'
self.profilerfile += '\\n'
self.profilerfile += 'import os'
self.profilerfile += '\\n'
self.profilerfile += 'from word2number import w2n'
self.profilerfile += '\\n'
self.profilerfile += 'import numpy as np'
self.profilerfile += '\\nfrom pathlib import Path\\n'
#print("1")
#print(profiler)
if(learner_type == 'Text Similarity' or len(text_features) > 0):
self.profilerfile += 'from text import TextProcessing'
self.profilerfile += '\\n'
self.profilerfile += 'def textCleaning(textCorpus):'
self.profilerfile += '\\n'
self.profilerfile += ' textProcessor = TextProcessing.TextProcessing()'
self.profilerfile += '\\n'
self.profilerfile += ' textCorpus = textProcessor.transform(textCorpus)'
self.profilerfile += '\\n'
self.profilerfile += ' return(textCorpus)'
self.profilerfile += '\\n'
self.profilerfile += 'class inputprofiler(object):'
self.profilerfile += '\\n'
self.profilerfile += ' def s2n(self,value):'
self.profilerfile += '\\n'
self.profilerfile += ' try:'
self.profilerfile += '\\n'
self.profilerfile += ' x=eval(value)'
self.profilerfile += '\\n'
self.profilerfile += ' return x'
self.profilerfile += '\\n'
self.profilerfile += ' except:'
self.profilerfile += '\\n'
self.profilerfile += ' try:'
self.profilerfile += '\\n'
self.profilerfile += ' return w2n.word_to_num(value)'
self.profilerfile += '\\n'
self.profilerfile += ' except:'
self.profilerfile += '\\n'
self.profilerfile += ' return np.nan '
self.profilerfile += '\\n'
self.profilerfile += ' def apply_profiler(self,df):'
self.profilerfile += '\\n'
if(len(wordToNumericFeatures) > 0):
for w2nFeature in wordToNumericFeatures:
if w2nFeature not in features:
continue
self.profilerfile += " df['"+w2nFeature+"']=df['"+w2nFeature+"'].apply(lambda x: self.s2n(x))"
self.profilerfile += '\\n'
self.profilerfile += " df = df.replace(r'^\\s*$', np.NaN, regex=True)"
self.profilerfile += '\\n'
self.profilerfile += ' try:'
self.profilerfile += '\\n'
self.profilerfile += ' df.dropna(how="all",axis=1,inplace=True)'
self.profilerfile += '\\n'
self.profilerfile += ' except:'
self.profilerfile += '\\n'
self.profilerfile += ' df.fillna(0)'
self.profilerfile += '\\n'
if model_type.lower() != 'timeseriesforecasting': #task 11997
self.profilerfile += ' preprocess_path = Path(__file__).parent.parent/"model"/"preprocess_pipe.pkl"\\n'
self.profilerfile += ' if preprocess_path.exists():\\n'
self.profilerfile += ' model = joblib.load(preprocess_path)\\n'
if model_type.lower()=='anomaly_detection' and model.lower() == 'autoencoder':
self.profilerfile += f" df[{features}] = model.transform(df[{features}])\\n"
else:
self.profilerfile += f" df = model.transform(df)\\n"
if 'operation' in profiler:
y = profiler['operation']
for action in y:
feature = action['feature']
#if feature not in features:
# continue
operation = action['Action']
if(operation == 'Drop'):
self.profilerfile += " if '"+feature+"' in df.columns:"
self.profilerfile += '\\n'
self.profilerfile += " df.drop(columns=['"+feature+"'],inplace = True)"
self.profilerfile += '\\n'
if(operation == 'FillValue'):
self.profilerfile += " if '"+feature+"' in df.columns:"
self.profilerfile += '\\n'
fvalue = action['value']
self.profilerfile += " df['"+feature+"'] = df['"+feature+"'].fillna(value='"+fvalue+"')"
self.profilerfile += '\\n'
if(operation == 'Encoder'):
value = action['value']
value = value.replace("\\n", "\\\\n")
self.profilerfile += " if '"+feature+"' in df.columns:"
self.profilerfile += '\\n'
self.profilerfile += " le_dict="+str(value)
self.profilerfile += '\\n'
self.profilerfile += " df['"+feature+"'] = df['"+feature+"'].apply(lambda x: le_dict.get(x,-1))"
self.profilerfile += '\\n'
self.profilerfile += " if -1 in df['"+feature+"'].values:"
self.profilerfile += '\\n'
self.profilerfile += " raise Exception('Category value of "+feature+" not present in training data')"
self.profilerfile += '\\n'
if 'conversion' in profiler:
catergoryConverton = profiler['conversion']
#print(catergoryConverton)
if (catergoryConverton['categoryEncoding'].lower() in ['targetencoding','onehotencoding']) and ('features' in catergoryConverton):
self.profilerfile += " encoder = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','categoryEncoder.pkl'))"
self.profilerfile += '\\n'
self.profilerfile += " CategoryFeatures = "+str(catergoryConverton['features'])
self.profilerfile += '\\n'
if catergoryConverton['categoryEncoding'].lower() == 'onehotencoding':
self.profilerfile += " transformed_data = encoder.transform(df[CategoryFeatures]).toarray()"
self.profilerfile += '\\n'
self.profilerfile += " feature_labels = encoder.get_feature_names(CategoryFeatures)"
self.profilerfile += '\\n'
self.profilerfile += " transformed_data = pd.DataFrame(transformed_data,columns=feature_labels) "
self.profilerfile += '\\n'
else:
self.profilerfile += " transformed_data = encoder.transform(df[CategoryFeatures])"
self.profilerfile += '\\n'
self.profilerfile += " dataColumns=list(df.columns)"
self.profilerfile += '\\n'
self.profilerfile += " nonNormFeatures=list(set(dataColumns) - set(CategoryFeatures))"
self.profilerfile += '\\n'
self.profilerfile += " dataArray=df[nonNormFeatures]"
self.profilerfile += '\\n'
self.profilerfile += " df = pd.concat([dataArray, transformed_data],axis=1)"
self.profilerfile += '\\n'
y = json.loads(numericToLabel_json)
for feature_details in y:
feature = feature_details['feature']
if feature not in features:
continue
label = feature_details['Labels']
bins = feature_details['Bins']
self.profilerfile += " if '"+feature+"' in df.columns:"
self.profilerfile += '\\n'
self.profilerfile += " cut_bins="+str(bins)
self.profilerfile += '\\n'
self.profilerfile += " cut_labels="+str(label)
self.profilerfile += '\\n'
self.profilerfile += " df['"+feature+"'] = pd.cut(df['"+feature+"'],bins=cut_bins,labels=cut_labels)"
self.profilerfile += '\\n'
self.profilerfile += " df['"+feature+"'] = df['"+feature+"'].fillna(value=0)"
self.profilerfile += '\\n'
if(len(text_features) > 0):
if(len(text_features) > 1):
self.profilerfile += ' merge_features = '+str(text_features)
self.profilerfile += '\\n'
self.profilerfile += ' df[\\'combined\\'] = df[merge_features].apply(lambda row: \\' \\'.join(row.values.astype(str)), axis=1)'
self.profilerfile += '\\n'
self.profilerfile += ' features = [\\'combined\\']'
self.profilerfile += '\\n'
else:
self.profilerfile += " features = "+str(text_features)
self.profilerfile += '\\n'
if model_type == 'BM25':
self.profilerfile += """\\
df_text = df[features[0]]
pipe = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','{preprocessing_pipe}'))
df['tokenize'] = pipe.transform(df_text)\\n""".format(preprocessing_pipe=preprocessing_pipe)
elif conversion_method == 'sentenceTransformer':
self.profilerfile += """\\
df_text = df[features[0]]
from sentence_transformers import SentenceTransformer
model = SentenceTransformer(\\'sentence-transformers/msmarco-distilroberta-base-v2\\')
df_vect = model.encode(df_text)
for empCol in {text_features}:
df = df.drop(columns=[empCol])
if isinstance(df_vect, np.ndarray):
df1 = pd.DataFrame(df_vect)
else:
df1 = pd.DataFrame(df_vect.toarray(),columns = pipe.named_steps[\\'vectorizer\\'].get_feature_names())
df1 = df1.add_suffix(\\'_vect\\')
df = pd.concat([df, df1],axis=1)\\n""".format(preprocessing_pipe=preprocessing_pipe, text_features=text_features)
else:
self.profilerfile += """\\
df_text = df[features[0]]
pipe = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','{preprocessing_pipe}'))
df_vect=pipe.transform(df_text)
for empCol in {text_features}:
df = df.drop(columns=[empCol])
if isinstance(df_vect, np.ndarray):
df1 = pd.DataFrame(df_vect)
else:
df1 = pd.DataFrame(df_vect.toarray(),columns = pipe.named_steps[\\'vectorizer\\'].get_feature_names())
df1 = df1.add_suffix(\\'_vect\\')
df = pd.concat([df, df1],axis=1)\\n""".format(preprocessing_pipe=preprocessing_pipe, text_features=text_features)
if(learner_type == 'Text Similarity'):
self.profilerfile += ' df[\\''+firstDocFeature+'\\'] = textCleaning(df[\\''+firstDocFeature+'\\'])'
self.profilerfile += '\\n'
self.profilerfile += ' df[\\''+secondDocFeature+'\\'] = textCleaning(df[\\''+secondDocFeature+'\\'])'
self.profilerfile += '\\n'
if len(normFeatures) > 0 and normalizer != '':
self.profilerfile += " normFeatures = "+str(normFeatures)
self.profilerfile += '\\n'
self.profilerfile += ' normalizepipe = joblib.load(os.path.join(os.path.dirname(os.path.abspath(__file__)),\\'..\\',\\'model\\',\\''+normalizer+'\\'))'
self.profilerfile += '\\n'
self.profilerfile += ' dataColumns=list(df.columns)'
self.profilerfile += '\\n'
self.profilerfile += ' nonNormFeatures=list(set(dataColumns) - set(normFeatures))'
self.profilerfile += '\\n'
self.profilerfile += ' dataframe=df[normFeatures]'
self.profilerfile += '\\n'
self.profilerfile += ' transDf = normalizepipe.transform(dataframe)'
self.profilerfile += '\\n'
self.profilerfile += ' nontransDF=df[nonNormFeatures].values'
self.profilerfile += '\\n'
self.profilerfile += ' dataColumns=normFeatures+nonNormFeatures'
self.profilerfile += '\\n'
self.profilerfile += ' scaledDf = pd.DataFrame(np.hstack((transDf, nontransDF)),columns=dataColumns)'
self.profilerfile += '\\n'
self.profilerfile += ' df=scaledDf'
self.profilerfile += '\\n'
else:
self.profilerfile += ' df=df.dropna()\\n'
self.profilerfile += ' return(df)'
filename = os.path.join(deploy_path,'script','inputprofiler.py')
self.log.info('-------> Profiler File Location :'+filename)
f = open(filename, "w",encoding="utf-8")
f.write(str(self.profilerfile))
f.close()
def isEnglish(self, s):
try:
s.encode(encoding='utf-8').decode('ascii')
except UnicodeDecodeError:
return False
else:
return True
def create_selector_file(self,deploy_path,features,pcaModel_pickle_file,bpca_features,apca_features,textFeatures,nonNumericFeatures,numericalFeatures,profiler,targetFeature, model_type,model,config |
=None):
cs.create_selector_file(self,deploy_path,features,pcaModel_pickle_file,bpca_features,apca_features,textFeatures,nonNumericFeatures,numericalFeatures,profiler,targetFeature, model_type,model,config)
def create_init_function_for_regress |
.crate_readme_file(deploy_path,saved_model,features,deployJson['method'])
from prediction_package.requirements import requirementfile
requirementfile(deploy_path,model,textFeatures,learner_type)
os.chdir(deploy_path)
textdata = False
if(learner_type == 'Text Similarity' or len(textFeatures) > 0):
textdata = True
self.create_util_folder(deploy_path,learner_type)
self.log.info('Status:- |... Model deployment completed')
def deployTSum(self,deploy_path,preTrainedModellocation):
def create_predict(preTrainedModellocation):
text = f"""
import sys
import json
def predict(data):
try:
import pandas as pd
import numpy as np
from pathlib import Path
keywordsFile =Path(__file__).parent/'data'/'keywordDataBase.csv'
outputSumFile =Path(__file__).parent/'data'/'summarizedOutput.csv'
fileName=data
#print("fileName---",fileName)
inputDataFileFrame = pd.DataFrame()
inputDataFileFrame['Sentences']=""
rowIndex=0
if fileName.endswith(".pdf"):
from pypdf import PdfReader
reader = PdfReader(fileName)
number_of_pages = len(reader.pages)
text=""
textOutputForFile=""
OrgTextOutputForFile=""
for i in range(number_of_pages) :
page = reader.pages[i]
text1 = page.extract_text()
text=text+text1
import nltk
tokens = nltk.sent_tokenize(text)
for sentence in tokens:
sentence=sentence.replace("\\\\n", " ")
if (len(sentence.split()) < 4 ) or (len(str(sentence.split(',')).split()) < 8)or (any(chr.isdigit() for chr in sentence)) :
continue
inputDataFileFrame.at[rowIndex,'Sentences']=str(sentence.strip())
rowIndex=rowIndex+1
if fileName.endswith(".txt"):
data=[]
with open(fileName, "r",encoding="utf-8") as f:
data.append(f.read())
str1 = ""
for ele in data:
str1 += ele
sentences=str1.split(".")
count=0
for sentence in sentences:
count += 1
inputDataFileFrame.at[rowIndex,'Sentences']=str(sentence.strip())
rowIndex=rowIndex+1
inputDataFileFrame['LabelByKw']=0
#print(inputDataFileFrame)
keywordsFileFrame=pd.read_csv(keywordsFile,encoding='utf-8')
Keyword_list = keywordsFileFrame['Keyword'].tolist()
for i in inputDataFileFrame.index:
for x in Keyword_list:
if (str(inputDataFileFrame["Sentences"][i])).lower().find(x) != -1:
inputDataFileFrame['LabelByKw'][i]=1
break
import pickle
from sklearn.preprocessing import LabelEncoder
pkl_filename='classificationModel.sav'
pkl_filename =Path(__file__).parent/'model'/'classificationModel.sav'
with open(pkl_filename, 'rb') as file:
pickle_model = pickle.load(file)
testsample=inputDataFileFrame[["Sentences"]]
labelencoder = LabelEncoder()
testsample["Sentences"] = labelencoder.fit_transform(testsample["Sentences"])
y_predicted = pickle_model.predict_proba(testsample)
df=pd.DataFrame({{"SectionName":np.nan,"Sentences":np.nan, "Predicted_Prob":y_predicted[:,1]}})
df['LabelByModel']=df['Predicted_Prob'].apply(lambda x: 0 if x <= 0.5 else 1 )
inputDataFileFrame['LabelByModel']= df['LabelByModel']
textToSum=""
for i in inputDataFileFrame.index:
if (inputDataFileFrame['LabelByModel'][i] or inputDataFileFrame['LabelByKw'][i]) :
textToSum=textToSum+" "+inputDataFileFrame["Sentences"][i]
stdir=r"{preTrainedModellocation}"
stdir = stdir.replace('\\\\\\\\', '\\\\\\\\\\\\\\\\')
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
modelbert = AutoModelForSeq2SeqLM.from_pretrained(stdir,local_files_only=True)
tokenizer = AutoTokenizer.from_pretrained(stdir,local_files_only=True)
inputs = tokenizer("summarize: " + textToSum, return_tensors="pt", max_length=512, truncation=True)
outputs = modelbert.generate(inputs["input_ids"], max_length=512, min_length=140, length_penalty=2.0, num_beams=4, early_stopping=True)
summarizedOutputOfSection= tokenizer.decode(outputs[0])
summarizedOutputOfSection=summarizedOutputOfSection.replace("</s> ","")
summarizedOutputOfSection=summarizedOutputOfSection.replace("<s> ","")
sumDatadata = [summarizedOutputOfSection]
df = pd.DataFrame(sumDatadata, columns=['textSum'])
df.to_csv(outputSumFile,encoding='utf-8')
outputjson = {{"status":"SUCCESS","msg":"Press Download button to download summarized output","data":summarizedOutputOfSection}}
print("predictions:",json.dumps(outputjson))
return (json.dumps(outputjson))
except KeyError as e:
output = {{"status":"FAIL","message":str(e).strip('"')}}
print("predictions:",json.dumps(output))
return (json.dumps(output))
except Exception as e:
output = {{"status":"FAIL","message":str(e).strip('"')}}
print("predictions:",json.dumps(output))
return (json.dumps(output))
if __name__ == "__main__":
output = predict(sys.argv[1])
"""
return text
deploy_path = Path(deploy_path)
aion_prediction = deploy_path/'aion_predict.py'
with open(aion_prediction, 'w') as f:
f.write(create_predict(preTrainedModellocation))
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
from pathlib import Path
from AION.prediction_package.imports import importModule
from AION.prediction_package.aion_prediction import aionPrediction
from AION.prediction_package.utility import TAB_CHAR
from AION.prediction_package import utility
from AION.prediction_package import common
from AION.prediction_package.base import deployer
def is_supported(problem_type, algo=None):
"""
Return True if problem_type supported otherwise False
"""
supported = ['classification','regression','clustering','timeseriesforecasting','Text Similarity']
return problem_type in supported
def get_deployer(problem_type, algo=None, params={}):
"""
Return deployer class object based on problem type
Raise error if no class is associated with problem type
"""
params['problem_type'] = problem_type
if problem_type == 'classification':
return classification( params)
elif problem_type == 'regression':
return regression( params)
elif problem_type == 'clustering':
return clustering( params)
elif problem_type == 'timeseriesforecasting':
from AION.prediction_package.time_series import forecasting
return forecasting.get_deployer( params)
elif problem_type == 'Text Similarity':
return textSimilarity( params)
else:
raise ValueError('deployment is not supported')
class classification( deployer):
def __init__(self, params={}):
super().__init__( params)
self.feature_reducer = False
if not self.name:
self.name = 'classification'
def create_idrift(self):
obj = aionPrediction()
if self.params['features']['text_feat']:
obj.create_text_drift_file(self.deploy_path,self.params['features']['text_feat'],self.params['features']['target_feat'],self.name)
else:
obj.create_drift_file(self.deploy_path,self.params['features']['input_feat'],self.params['features']['target_feat'],self.name)
def create_odrift(self):
obj = aionPrediction()
if self.params['features']['text_feat']:
obj.create_classification_text_performance_file(self.deploy_path,self.params['features']['text_feat'],self.params['features']['target_feat'])
else:
obj.create_classification_performance_file(self.deploy_path,self.params['features']['input_feat'],self.params['features']['target_feat'])
def training_code( self):
self.importer.addModule(module='pandas',mod_as='pd')
code = f"""
class trainer():
"""
init_code, run_code = self._get_train_code()
return code + init_code + run_code
def _get_train_code(self):
init_code = f"""
def __init__( self):
model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}"
if not model_file.exists():
raise ValueError(f'Trained model file not found: {{model_file}}')"""
run_code = f"""
def run(self, df):\\
"""
if self.params['training']['algo'] in ['Neural Network']:
self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models')
init_code += f"""
self.model = load_model(model_file)
"""
run_code += """
df = df.astype(np.float32)
return pd.DataFrame(np.argmax(self.model.predict(df),axis=1))
"""
elif self.params['training']['algo'] in ['Neural Architecture Search']:
self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models')
self.importer.addModule(module='autokeras',mod_as='ak')
init_code += f"""
self.model = load_model(model_file,custom_objects=ak.CUSTOM_OBJECTS)
"""
run_code += """
df = df.astype(np.float32)
return pd.DataFrame(self.model.predict(df))
"""
elif self.params['training']['algo'] in ['Deep Q Network','Dueling Deep Q Network']:
self.importer.addModule('joblib')
self.importer.addModule(module='numpy',mod_as='np')
self.importer.addModule(module='constant',mod_from='tensorflow')
self.importer.addModule(module='time_step',mod_from='tf_agents.trajectories')
init_code += f"""
self.model = joblib.load(model_file)
"""
run_code += """
df = df.astype(np.float32)
q, _ = self.model(np.array(df), step_type=constant([time_step.StepType.FIRST] * np.array(df).shape[0]), training=False)
return pd.DataFrame(q.numpy())
"""
elif self.params['training']['algo'] in ['Convolutional Neural Network (1D)','Recurrent Neural Network','Recurrent Neural Network (GRU)','Recurrent Neural Network (LSTM)']:
self.importer.addModule(module='numpy',mod_as='np')
self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models')
init_code += f"""
self.model = load_model(model_file)
"""
run_code += """
df = np.expand_dims(df, axis=2)
df = df.astype(np.float32)
return pd.DataFrame(np.argmax(self.model.predict(df),axis=1))
"""
else:
self.importer.addModule(module='joblib')
self.importer.addModule(module='numpy',mod_as='np')
init_code += f"""
self.model = joblib.load(model_file)
"""
run_code += """
df = df.astype(np.float32)
return pd.DataFrame(self.model.predict_proba(df), columns=self.model.classes_)
"""
return init_code, run_code
def formatter_code(self):
self.importer.addModule('json')
self.importer.addModule('joblib')
self.importer.addModule('pandas', mod_as='pd')
return """
class output_format():
def __init__(self):
pass
def run(self, raw_df, output):
output = round(output,2)
encoder_file = (Path(__file__).parent/"model")/"label_encoder.pkl"
if encoder_file.exists():
encoder = joblib.load(encoder_file)
output.rename(columns=dict(zip(output.columns, encoder.inverse_transform(list(output.columns)))), inplace=True)
raw_df['prediction'] = output.idxmax(axis=1)
raw_df['probability'] = output.max(axis=1).round(2)
raw_df['remarks'] = output.apply(lambda x: x.to_json(double_precision=2), axis=1)
outputjson = raw_df.to_json(orient='records',double_precision=5)
outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}
return(json.dumps(outputjson))
"""
class regression( deployer):
def __init__(self, params={}):
super().__init__( params)
self.feature_reducer = False
if not self.name:
self.name = 'regression'
def create_idrift(self):
obj = aionPrediction()
if self.params['features']['text_feat']:
obj.create_text_drift_file(self.deploy_path,self.params['features']['text_feat'],self.params['features']['target_feat'],self.name)
else:
obj.create_drift_file(self.deploy_path,self.params['features']['input_feat'],self.params['features']['target_feat'],self.name)
def create_odrift(self):
obj = aionPrediction()
if self.params['features']['text_feat']:
obj.create_regression_text_performance_file(self.deploy_path,self.params['features |
']['text_feat'],self.params['features']['target_feat'])
else:
obj.create_regression_performance_file(self.deploy_path,self.params['features']['input_feat'],self.params['features']['target_feat'])
def training_code( self):
self.importer.addModule(module='pandas',mod_as='pd')
code = f"""
class trainer():
"""
init_code = f"""
def __init__( self):
model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}"
if not model_file.exists():
raise ValueError(f'Trained model file not found: {{model_file}}')
"""
run_code = f"""
def run(self, df):\\
"""
if self.params['training']['algo'] in ['Neural Architecture Search']:
self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models')
self.importer.addModule(module='autokeras',mod_as='ak')
init_code += f"""
self.model = load_model(model_file,custom_objects=ak.CUSTOM_OBJECTS)
"""
run_code += """
df = df.astype(np.float32)
return self.model.predict(df).reshape(1, -1)
"""
elif self.params['training']['algo'] in ['Neural Network','Convolutional Neural Network (1D)','Recurrent Neural Network','Recurrent Neural Network (GRU)','Recurrent Neural Network (LSTM)']:
self.importer.addModule(module='numpy',mod_as='np')
self.importer.addModule(module='load_model',mod_from='tensorflow.keras.models')
init_code += f"""
self.model = load_model(model_file)
"""
run_code += """
df = np.expand_dims(df, axis=2)
df = df.astype(np.float32)
return self.model.predict(df).reshape(1, -1)
"""
else:
self.importer.addModule('joblib')
init_code += f"""
self.model = joblib.load(model_file)
"""
run_code += """
df = df.astype(np.float32)
return self.model.predict(df).reshape(1, -1)
"""
return code + init_code + run_code
def formatter_code(self):
self.importer.addModule('json')
self.importer.addModule('pandas', mod_as='pd')
return """
class output_format():
def __init__(self):
pass
def run(self, raw_df, output):
raw_df['prediction'] = output[0]
raw_df['prediction'] = raw_df['prediction'].round(2)
outputjson = raw_df.to_json(orient='records',double_precision=5)
outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}
return(json.dumps(outputjson))
"""
class clustering( deployer):
def __init__(self, params={}):
super().__init__( params)
self.feature_reducer = False
if not self.name:
self.name = 'clustering'
def training_code( self):
self.importer.addModule('joblib')
self.importer.addModule(module='pandas',mod_as='pd')
code = f"""
class trainer():
"""
init_code = f"""
def __init__( self):
model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}"
if not model_file.exists():
raise ValueError(f'Trained model file not found: {{model_file}}')
"""
run_code = f"""
def run(self, df):\\
"""
if self.params['training']['algo'] == 'DBSCAN':
init_code += f"""
self.model = joblib.load(model_file)
"""
run_code += """
return self.model.fit_predict(df)
"""
else:
init_code += f"""
self.model = joblib.load(model_file)
"""
run_code += """
return self.model.predict(df).reshape(1, -1)
"""
return code + init_code + run_code
def formatter_code(self):
self.importer.addModule('json')
self.importer.addModule('pandas', mod_as='pd')
return """
class output_format():
def __init__(self):
pass
def run(self, raw_df, output):
raw_df['prediction'] = output[0]
raw_df['prediction'] = raw_df['prediction'].round(2)
outputjson = raw_df.to_json(orient='records',double_precision=2)
outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}
return(json.dumps(outputjson))
"""
return code
if __name__ == '__main__':
config = {'usecase_name': 'AI0110', 'usecase_ver': '1', 'features': {'input_feat': ['v2'], 'target_feat': 'v1', 'text_feat': ['v2']}, 'paths': {'deploy': r'C:/Users/vashistah/AppData/Local/Programs/HCLTech/AION/data/target/AI0110/1', 'usecase': r'C:/Users/vashistah/AppData/Local/Programs/HCLTech/AION/data/target/AI0110'}, 'profiler': {'input_features': ['v2'], 'output_features': ['07xxxxxxxxx_vect', '08700621170150p_vect', '08702840625comuk_vect', '08718726270150gbpmtmsg18_vect', '1000s_vect', '10am7pm_vect', '10k_vect', '10p_vect', '10pmin_vect', '10ppm_vect', '11mths_vect', '125gift_vect', '12hrs_vect', '12mths_vect', '150p_vect', '150perwksub_vect', '150pm_vect', '150pmin_vect', '150pmsg_vect', '150pmsgrcvdhgsuite3422landsroww1j6hl_vect', '150pmtmsgrcvd18_vect', '150ppm_vect', '150ptone_vect', '150pwk_vect', '150week_vect', '16only_vect', '18only_vect', '1hr_vect', '1minmobsmorelkpobox177hp51fl_vect', '1st_vect', '1x150pwk_vect', '20p_vect', '20pmin_vect', '21st_vect', '220cm2_vect', '24hrs_vect', '25p_vect', '26th_vect', '2day_vect', '2find_vect', '2geva_vect', '2go_vect', '2marrow_vect', '2mrw_vect', '2nd_vect', '2nite_vect', '2optout_vect', '2p_vect', '2u_vect', '2waxsto_vect', '2wks_vect', '300p_vect', '31pmsg_vect', '3510i_vect', '3d_vect', '3g_vect', '3gbp_vect', '3hrs_vect', '3mins_vect', '3qxj9_vect', '3rd_vect', '3ss_vect', '3u_vect', '3uz_vect', '3wk_vect', '40gb_vect', '4a_vect', '4d_vect', '4eva_vect', '4get_vect', '4info_vect', '4mths_vect', '4th_vect', '4u_vect', '50p_vect', '5min_vect', '5pm_vect', '5wb_vect', '5we_vect', '60pmin_vect', '6hrs_vect', '6months_vect', '6pm_vect', '7250i_vect', '7ish_vect', '8am_vect', '8pm_vect', '8th_vect', '8wp_vect', '9ae_vect', '9ja_vect', '9pm_vect', '9t_vect', 'aathi_vect', 'abi_vect', 'ability_vect', 'abiola_vect', 'able_vect', 'abt_vect', 'abta_vect', 'aburo_vect', 'ac_vect', 'academic_vect', 'acc_vect', 'accept_vect', 'access_vect', 'accident_vect', 'accidentally_vect', 'accordingly_vect', 'account_vect', 'ache_vect', 'across_vect', 'acted_vect', 'action_vect', 'activate_vect', 'activities_vect', 'actor_vect', 'actual_vect', 'actually_vect', 'ad_vect', 'adam_vect', 'add_vect', 'added_vect', 'addicted_vect', 'addie_vect', 'address_vect', 'admin_vect', 'administrator_vect', 'admirer_vect', 'admit_vect', 'adore_vect', 'adoring_vect', 'ads_vect', 'adult_vect', 'advance_vect', 'adventure_vect', 'advice_vect', 'advise_vect', 'affair_vect', 'affairs_vect', 'affectionate_vect', 'afraid_vect', 'aft_vect', 'afternoon_vect', 'aftr_vect', 'agalla_vect', 'age_vect', 'age16_vect', 'ages_vect', 'ago_vect', 'agree_vect', 'ah_vect', 'aha_vect', 'ahead_vect', 'ahmad_vect', 'ai_vect', 'aight_vect', 'aint_vect', 'air_vect', 'airport_vect', 'airtel_vect', 'aiya_vect', 'aiyah_vect', 'aiyar_vect', 'aiyo_vect', 'al_vect', 'album_vect', 'alert_vect', 'alex_vect', 'alfie_vect', 'ali_vect', 'allah_vect', 'allow_vect', 'allowed_vect', 'almost_vect', 'alone_vect', 'along_vect', 'already_vect', 'alright_vect', 'alrite_vect', 'also_vect', 'always_vect', 'alwys_vect', 'amazing_vect', 'american_vect', 'among_vect', 'amount_vect', 'amp_vect', 'amt_vect', 'andros_vect', 'angry_vect', 'annie_vect', 'anniversary_vect', 'announcement_vect', 'anot_vect', 'another_vect', 'ans_vect', 'ansr_vect', 'answer_vect', 'answered_vect', 'answering_vect', 'answers_vect', 'anthony_vect', 'anti_vect', 'anybody_vect', 'anymore_vect', 'anyone_vect', 'anything_vect', 'anytime_vect', 'anyway_vect', 'anyways_vect', 'apartment_vect', 'app_vect', 'apparently_vect', 'applebees_vect', 'apply_vect', 'appointment_vect', 'appreciate_vect', 'appreciated_vect', 'approx_vect', 'apps_vect', 'appt_vect', 'april_vect', 'ar_vect', 'arcade_vect', 'ard_vect', 'area_vect', 'argh_vect', 'argument_vect', 'arm_vect', 'armand_vect', 'arms_vect', 'around_vect', 'arrange_vect', 'arrested_vect', 'arrive_vect', 'arsenal_vect', 'art_vect', 'arun_vect', 'asap_vect', 'ashley_vect', 'ask_vect', 'askd_vect', 'asked_vect', 'askin_vect', 'asking_vect', 'asks_vect', 'asleep_vect', 'ass_vect', 'assume_vect', 'ate_vect', 'atlanta_vect', 'atlast_vect', 'atm_vect', 'attached_vect', 'attempt_vect', 'attend_vect', 'auction_vect', 'august_vect', 'aunt_vect', 'aunty_vect', 'auto_vect', 'av_vect', 'available_vect', 'avatar_vect', 'ave_vect', 'avent_vect', 'avoid_vect', 'await_vect', 'awaiting_vect', 'awake_vect', 'award_vect', 'awarded_vect', 'away_vect', 'awesome_vect', 'aww_vect', 'b4_vect', 'ba_vect', 'babe_vect', 'babes_vect', 'babies_vect', 'baby_vect', 'back_vect', 'bad_vect', 'bag_vect', 'bags_vect', 'bahamas_vect', 'bak_vect', 'balance_vect', 'bank_vect', 'banks_vect', 'bar_vect', 'barely_vect', 'basic_vect', 'basically_vect', 'bat_vect', 'bath_vect', 'bathe_vect', 'bathing_vect', 'battery_vect', 'bay_vect', 'bb_vect', 'bc_vect', 'bck_vect', 'bcoz_vect', 'bday_vect', 'be_vect', 'bears_vect', 'beautiful_vect', 'beauty_vect', 'bec_vect', 'become_vect', 'becoz_vect', 'bed_vect', 'bedrm_vect', 'bedroom_vect', 'beer_vect', 'befor_vect', 'beg_vect', 'begin_vect', 'behave_vect', 'behind_vect', 'bein_vect', 'believe_vect', 'bell_vect', 'belly_vect', 'belovd_vect', 'best_vect', 'bet_vect', 'better_vect', 'beyond_vect', 'bf_vect', 'bid_vect', 'bids_vect', 'big_vect', 'bigger_vect', 'biggest_vect', 'bill_vect', 'billed_vect', 'billion_vect', 'bills_vect', 'bin_vect', 'biola_vect', 'birds_ve |
ct', 'birla_vect', 'birth_vect', 'birthdate_vect', 'birthday_vect', 'bishan_vect', 'bit_vect', 'bitch_vect', 'bite_vect', 'black_vect', 'blackberry_vect', 'blah_vect', 'blake_vect', 'blank_vect', 'bleh_vect', 'bless_vect', 'blessing_vect', 'bloo_vect', 'blood_vect', 'bloody_vect', 'blue_vect', 'bluetooth_vect', 'bluff_vect', 'boat_vect', 'body_vect', 'bold_vect', 'bone_vect', 'bonus_vect', 'boo_vect', 'book_vect', 'booked_vect', 'booking_vect', 'books_vect', 'boost_vect', 'booty_vect', 'bored_vect', 'boring_vect', 'born_vect', 'boss_vect', 'boston_vect', 'bother_vect', 'bottom_vect', 'bought_vect', 'bout_vect', 'bowl_vect', 'box_vect', 'box326_vect', 'box334sk38ch_vect', 'box97n7qp_vect', 'boy_vect', 'boye_vect', 'boyfriend_vect', 'boys_vect', 'boytoy_vect', 'brah_vect', 'brand_vect', 'bread_vect', 'break_vect', 'breathe_vect', 'bright_vect', 'brilliant_vect', 'bring_vect', 'bringing_vect', 'brings_vect', 'british_vect', 'bro_vect', 'broad_vect', 'broke_vect', 'broken_vect', 'bros_vect', 'brothas_vect', 'brother_vect', 'brought_vect', 'bruv_vect', 'bslvyl_vect', 'bt_vect', 'btnationalrate_vect', 'btw_vect', 'bucks_vect', 'bud_vect', 'budget_vect', 'buff_vect', 'buffet_vect', 'bugis_vect', 'building_vect', 'buns_vect', 'burger_vect', 'burns_vect', 'bus_vect', 'buses_vect', 'business_vect', 'busy_vect', 'butt_vect', 'buy_vect', 'buying_vect', 'buzz_vect', 'bx420_vect', 'bx420ip45we_vect', 'bye_vect', 'ca_vect', 'cabin_vect', 'cafe_vect', 'cake_vect', 'cal_vect', 'calculation_vect', 'calicut_vect', 'california_vect', 'call_vect', 'call2optout674_vect', 'callback_vect', 'callcost_vect', 'called_vect', 'caller_vect', 'callers_vect', 'callertune_vect', 'callin_vect', 'calling_vect', 'calls_vect', 'callså_vect', 'cam_vect', 'camcorder_vect', 'came_vect', 'camera_vect', 'cameravideo_vect', 'campus_vect', 'can_vect', 'canada_vect', 'canal_vect', 'canary_vect', 'cancel_vect', 'cancelled_vect', 'cancer_vect', 'cant_vect', 'captain_vect', 'car_vect', 'card_vect', 'cardiff_vect', 'care_vect', 'cared_vect', 'career_vect', 'careful_vect', 'carefully_vect', 'caring_vect', 'carlos_vect', 'caroline_vect', 'cars_vect', 'cartoon_vect', 'case_vect', 'cash_vect', 'cashbalance_vect', 'cashin_vect', 'castor_vect', 'cat_vect', 'catch_vect', 'catching_vect', 'caught_vect', 'cause_vect', 'cbe_vect', 'cc_vect', 'cd_vect', 'cdgt_vect', 'cds_vect', 'celebrate_vect', 'celebration_vect', 'cell_vect', 'center_vect', 'centre_vect', 'certainly_vect', 'cha_vect', 'chain_vect', 'challenge_vect', 'chance_vect', 'change_vect', 'changed_vect', 'changes_vect', 'channel_vect', 'character_vect', 'charge_vect', 'charged_vect', 'charges_vect', 'charity_vect', 'charles_vect', 'chase_vect', 'chasing_vect', 'chat_vect', 'chatting_vect', 'cheap_vect', 'cheaper_vect', 'cheat_vect', 'chechi_vect', 'check_vect', 'checked_vect', 'checking_vect', 'cheers_vect', 'chennai_vect', 'cherish_vect', 'chest_vect', 'chicken_vect', 'chikku_vect', 'child_vect', 'childish_vect', 'children_vect', 'chill_vect', 'chillin_vect', 'china_vect', 'chinese_vect', 'chip_vect', 'chocolate_vect', 'choice_vect', 'choose_vect', 'chosen_vect', 'christ_vect', 'christmas_vect', 'church_vect', 'cine_vect', 'cinema_vect', 'citizen_vect', 'city_vect', 'claim_vect', 'claims_vect', 'claire_vect', 'class_vect', 'classes_vect', 'clean_vect', 'cleaning_vect', 'clear_vect', 'clearly_vect', 'click_vect', 'clock_vect', 'close_vect', 'closed_vect', 'closer_vect', 'closes_vect', 'clothes_vect', 'club_vect', 'cn_vect', 'co_vect', 'coast_vect', 'coat_vect', 'cochin_vect', 'code_vect', 'coffee_vect', 'coin_vect', 'coins_vect', 'cold_vect', 'colleagues_vect', 'collect_vect', 'collected_vect', 'collecting_vect', 'collection_vect', 'college_vect', 'colour_vect', 'come_vect', 'comedy_vect', 'comes_vect', 'comin_vect', 'coming_vect', 'commercial_vect', 'common_vect', 'community_vect', 'comp_vect', 'company_vect', 'competition_vect', 'complete_vect', 'completed_vect', 'completely_vect', 'complimentary_vect', 'computer_vect', 'concentrate_vect', 'concert_vect', 'conditions_vect', 'conducts_vect', 'confidence_vect', 'confirm_vect', 'congrats_vect', 'congratulations_vect', 'connection_vect', 'consider_vect', 'considering_vect', 'constant_vect', 'constantly_vect', 'contact_vect', 'contacted_vect', 'contacts_vect', 'content_vect', 'contents_vect', 'continue_vect', 'contract_vect', 'control_vect', 'convey_vect', 'convinced_vect', 'cool_vect', 'coping_vect', 'copy_vect', 'cornwall_vect', 'correct_vect', 'cos_vect', 'cost_vect', 'costa_vect', 'costs_vect', 'costå_vect', 'could_vect', 'count_vect', 'countin_vect', 'country_vect', 'couple_vect', 'course_vect', 'cover_vect', 'coz_vect', 'cr9_vect', 'cramps_vect', 'crave_vect', 'crazy_vect', 'created_vect', 'credit_vect', 'credited_vect', 'credits_vect', 'creepy_vect', 'crisis_vect', 'crore_vect', 'cross_vect', 'croydon_vect', 'cruise_vect', 'cry_vect', 'cs_vect', 'csbcm4235wc1n3xx_vect', 'csstop_vect', 'cud_vect', 'cuddling_vect', 'cum_vect', 'cup_vect', 'curious_vect', 'current_vect', 'currently_vect', 'cust_vect', 'custcare_vect', 'custcare08718720201_vect', 'custom_vect', 'customer_vect', 'customers_vect', 'cut_vect', 'cute_vect', 'cutting_vect', 'cuz_vect', 'cw25wx_vect', 'da_vect', 'dad_vect', 'daddy_vect', 'daily_vect', 'damn_vect', 'dance_vect', 'dancing_vect', 'dare_vect', 'dark_vect', 'darlin_vect', 'darling_vect', 'darren_vect', 'dat_vect', 'date_vect', 'dates_vect', 'dating_vect', 'dave_vect', 'day_vect', 'days_vect', 'de_vect', 'dead_vect', 'deal_vect', 'dealer_vect', 'dealing_vect', 'dear_vect', 'dearly_vect', 'death_vect', 'decide_vect', 'decided_vect', 'decimal_vect', 'decision_vect', 'deep_vect', 'def_vect', 'definite_vect', 'definitely_vect', 'del_vect', 'delete_vect', 'deleted_vect', 'delhi_vect', 'deliver_vect', 'delivered_vect', 'deliveredtomorrow_vect', 'delivery_vect', 'dem_vect', 'demand_vect', 'den_vect', 'denis_vect', 'department_vect', 'depends_vect', 'depressed_vect', 'derek_vect', 'desires_vect', 'desperate_vect', 'details_vect', 'dey_vect', 'dhoni_vect', 'dial_vect', 'dick_vect', 'dictionary_vect', 'didn_vect', 'didnt_vect', 'didt_vect', 'die_vect', 'died_vect', 'diet_vect', 'different_vect', 'difficult_vect', 'digital_vect', 'dignity_vect', 'din_vect', 'dinner_vect', 'dint_vect', 'direct_vect', 'directly_vect', 'dirty_vect', 'dis_vect', 'discount_vect', 'discuss_vect', 'dislikes_vect', 'display_vect', 'distance_vect', 'distract_vect', 'disturb_vect', 'division_vect', 'dload_vect', 'dnt_vect', 'doc_vect', 'docs_vect', 'doctor_vect', 'doesnt_vect', 'dog_vect', 'dogging_vect', 'doggy_vect', 'doin_vect', 'dollars_vect', 'don_vect', 'done_vect', 'dont_vect', 'donåõt_vect', 'door_vect', 'dorm_vect', 'double_vect', 'dough_vect', 'download_vect', 'downloads_vect', 'draw_vect', 'dream_vect', 'dreams_vect', 'dress_vect', 'dressed_vect', 'dresser_vect', 'drink_vect', 'drinking_vect', 'drinks_vect', 'drive_vect', 'driver_vect', 'drivin_vect', 'driving_vect', 'drop_vect', 'dropped_vect', 'drug_vect', 'drugs_vect', 'drunk_vect', 'dry_vect', 'ds_vect', 'dubsack_vect', 'dude_vect', 'due_vect', 'dun_vect', 'dunno_vect', 'durban_vect', 'dvd_vect', 'earlier_vect', 'early_vect', 'earth_vect', 'easier_vect', 'easily_vect', 'east_vect', 'easter_vect', 'easy_vect', 'eat_vect', 'eaten_vect', 'eatin_vect', 'eating_vect', 'ebay_vect', 'ec2a_vect', 'ee_vect', 'eek_vect', 'eerie_vect', 'effects_vect', 'eg_vect', 'egg_vect', 'eggs_vect', 'eh_vect', 'eight_vect', 'either_vect', 'ela_vect', 'electricity_vect', 'else_vect', 'elsewhere_vect', 'em_vect', 'email_vect', 'embarassed_vect', 'empty_vect', 'end_vect', 'ended_vect', 'ending_vect', 'ends_vect', 'enemy_vect', 'energy_vect', 'eng_vect', 'engin_vect', 'england_vect', 'english_vect', 'enjoy_vect', 'enjoyed_vect', 'enough_vect', 'enter_vect', 'entered_vect', 'entitled_vect', 'entry_vect', 'enuff_vect', 'envelope_vect', 'er_vect', 'erm_vect', 'escape_vect', 'especially_vect', 'esplanade_vect', 'eta_vect', 'etc_vect', 'euro_vect', 'euro2004_vect', 'europe_vect', 'eve_vect', 'eveb_vect', 'even_vect', 'evening_vect', 'event_vect', 'ever_vect', 'every_vect', 'everybody_vect', 'everyday_vect', 'everyone_vect', 'everything_vect', 'everywhere_vect', 'evn_vect', 'evng_vect', 'ex_vect', 'exact_vect', 'exactly_vect', 'exam_vect', 'exams_vect', 'excellent_vect', 'except_vect', 'exciting_vect', 'excuse_vect', 'excuses_vect', 'executive_vect', 'exeter_vect', 'exhausted_vect', 'expect_vect', 'expecting_vect', 'expensive_vect', 'experience_vect', 'expired_vect', 'expires_vect', 'explain_vect', 'explicit_vect', 'explosive_vect', 'express_vect', 'extra_vect', 'eye_vect', 'eyes_vect', 'fa_vect', 'fab_vect', 'face_vect', 'facebook_vect', 'fact_vect', 'faggy_vect', 'failed_vect', 'fair_vect', 'faith_vect', 'fall_vect', 'falls_vect', 'family_vect', 'fan_vect', ' |
fancies_vect', 'fancy_vect', 'fantasies_vect', 'fantastic_vect', 'fantasy_vect', 'far_vect', 'farm_vect', 'fast_vect', 'faster_vect', 'fat_vect', 'father_vect', 'fathima_vect', 'fault_vect', 'fave_vect', 'favorite_vect', 'favour_vect', 'favourite_vect', 'fb_vect', 'feb_vect', 'february_vect', 'feel_vect', 'feelin_vect', 'feeling_vect', 'feels_vect', 'fees_vect', 'feet_vect', 'fell_vect', 'felt_vect', 'fetch_vect', 'fever_vect', 'field_vect', 'fifteen_vect', 'fight_vect', 'fighting_vect', 'figure_vect', 'file_vect', 'files_vect', 'fill_vect', 'filling_vect', 'fills_vect', 'film_vect', 'final_vect', 'finally_vect', 'find_vect', 'fine_vect', 'fingers_vect', 'finish_vect', 'finished_vect', 'first_vect', 'fit_vect', 'fix_vect', 'fixed_vect', 'flag_vect', 'flaked_vect', 'flash_vect', 'flat_vect', 'flight_vect', 'flights_vect', 'flirt_vect', 'floor_vect', 'flower_vect', 'fml_vect', 'fo_vect', 'follow_vect', 'followed_vect', 'following_vect', 'fone_vect', 'food_vect', 'fool_vect', 'football_vect', 'force_vect', 'foreign_vect', 'forever_vect', 'forevr_vect', 'forget_vect', 'forgets_vect', 'forgiven_vect', 'forgot_vect', 'format_vect', 'forums_vect', 'forward_vect', 'forwarded_vect', 'found_vect', 'four_vect', 'fr_vect', 'fran_vect', 'freak_vect', 'free_vect', 'freefone_vect', 'freemsg_vect', 'freephone_vect', 'freezing_vect', 'fren_vect', 'frens_vect', 'fret_vect', 'fri_vect', 'friday_vect', 'friend_vect', 'friends_vect', 'friendship_vect', 'fringe_vect', 'frm_vect', 'frnd_vect', 'frnds_vect', 'frndship_vect', 'fuck_vect', 'fuckin_vect', 'fucking_vect', 'ful_vect', 'full_vect', 'fullonsmscom_vect', 'fun_vect', 'funny_vect', 'future_vect', 'fyi_vect', 'gal_vect', 'gals_vect', 'game_vect', 'games_vect', 'gang_vect', 'gap_vect', 'gaps_vect', 'garage_vect', 'garbage_vect', 'gary_vect', 'gas_vect', 'gautham_vect', 'gave_vect', 'gay_vect', 'gd_vect', 'ge_vect', 'gee_vect', 'geeee_vect', 'geeeee_vect', 'gender_vect', 'generally_vect', 'genius_vect', 'gentle_vect', 'gentleman_vect', 'gently_vect', 'germany_vect', 'get_vect', 'gets_vect', 'gettin_vect', 'getting_vect', 'gf_vect', 'gibbs_vect', 'gift_vect', 'gim_vect', 'girl_vect', 'girls_vect', 'gist_vect', 'giv_vect', 'give_vect', 'given_vect', 'gives_vect', 'giving_vect', 'glad_vect', 'gn_vect', 'go_vect', 'goal_vect', 'god_vect', 'goes_vect', 'goin_vect', 'going_vect', 'gold_vect', 'gon_vect', 'gona_vect', 'gone_vect', 'good_vect', 'goodmorning_vect', 'goodnight_vect', 'goodnite_vect', 'google_vect', 'gorgeous_vect', 'gossip_vect', 'got_vect', 'goto_vect', 'gotten_vect', 'govtinstituitions_vect', 'gr8_vect', 'grace_vect', 'gram_vect', 'grand_vect', 'granite_vect', 'gravity_vect', 'great_vect', 'green_vect', 'greet_vect', 'greetings_vect', 'grins_vect', 'grl_vect', 'ground_vect', 'group_vect', 'gt_vect', 'guaranteed_vect', 'gud_vect', 'gudnite_vect', 'guess_vect', 'guessing_vect', 'guide_vect', 'guilty_vect', 'guy_vect', 'guys_vect', 'gym_vect', 'ha_vect', 'haf_vect', 'haha_vect', 'hai_vect', 'hair_vect', 'haiz_vect', 'half_vect', 'halloween_vect', 'ham_vect', 'hand_vect', 'handed_vect', 'handle_vect', 'hands_vect', 'handset_vect', 'hanging_vect', 'happen_vect', 'happened_vect', 'happening_vect', 'happens_vect', 'happiness_vect', 'happy_vect', 'hard_vect', 'hardcore_vect', 'hardly_vect', 'harry_vect', 'hate_vect', 'hav_vect', 'havent_vect', 'havenåõt_vect', 'havin_vect', 'head_vect', 'headache_vect', 'headin_vect', 'heads_vect', 'hear_vect', 'heard_vect', 'heart_vect', 'heavy_vect', 'hee_vect', 'height_vect', 'held_vect', 'helen_vect', 'hell_vect', 'hella_vect', 'hello_vect', 'help_vect', 'help08712400602450p_vect', 'helpline_vect', 'hence_vect', 'henry_vect', 'heri_vect', 'herlove_vect', 'hes_vect', 'hex_vect', 'hey_vect', 'hgsuite3422lands_vect', 'hi_vect', 'hide_vect', 'high_vect', 'hill_vect', 'hint_vect', 'hip_vect', 'history_vect', 'hit_vect', 'hiya_vect', 'hl_vect', 'hlp_vect', 'hmm_vect', 'hmmm_vect', 'hmv_vect', 'ho_vect', 'hockey_vect', 'hol_vect', 'hold_vect', 'holder_vect', 'holding_vect', 'holiday_vect', 'holla_vect', 'hols_vect', 'holy_vect', 'home_vect', 'honey_vect', 'hook_vect', 'hop_vect', 'hope_vect', 'hoped_vect', 'hopefully_vect', 'hoping_vect', 'horny_vect', 'horo_vect', 'horrible_vect', 'hospital_vect', 'hospitals_vect', 'hostel_vect', 'hot_vect', 'hotel_vect', 'hotels_vect', 'hour_vect', 'hours_vect', 'house_vect', 'housemaid_vect', 'however_vect', 'hows_vect', 'howz_vect', 'hr_vect', 'hrishi_vect', 'hrs_vect', 'http_vect', 'hubby_vect', 'hug_vect', 'huh_vect', 'hun_vect', 'hungry_vect', 'hunny_vect', 'hurry_vect', 'hurt_vect', 'hurting_vect', 'hurts_vect', 'husband_vect', 'hv_vect', 'hw_vect', 'hyde_vect', 'iam_vect', 'ibhltd_vect', 'ibiza_vect', 'ic_vect', 'ice_vect', 'id_vect', 'idea_vect', 'ideal_vect', 'ideas_vect', 'identifier_vect', 'idiot_vect', 'idk_vect', 'ignore_vect', 'ikea_vect', 'il_vect', 'ill_vect', 'im_vect', 'imagine_vect', 'imma_vect', 'immediately_vect', 'imp_vect', 'important_vect', 'impossible_vect', 'improved_vect', 'in2_vect', 'inc_vect', 'inches_vect', 'incident_vect', 'include_vect', 'including_vect', 'inclusive_vect', 'inconsiderate_vect', 'indeed_vect', 'india_vect', 'indian_vect', 'indians_vect', 'indicate_vect', 'infections_vect', 'infernal_vect', 'info_vect', 'inform_vect', 'information_vect', 'informed_vect', 'innings_vect', 'insha_vect', 'inside_vect', 'instantly_vect', 'instead_vect', 'instructions_vect', 'insurance_vect', 'intelligent_vect', 'interest_vect', 'interested_vect', 'interesting_vect', 'interflora_vect', 'internet_vect', 'intro_vect', 'invest_vect', 'invite_vect', 'invited_vect', 'inviting_vect', 'iouri_vect', 'ip4_vect', 'ipod_vect', 'irritating_vect', 'iscoming_vect', 'ish_vect', 'island_vect', 'islands_vect', 'isnt_vect', 'issue_vect', 'issues_vect', 'it_vect', 'italian_vect', 'its_vect', 'itz_vect', 'itåõs_vect', 'ive_vect', 'iz_vect', 'izzit_vect', 'iåõm_vect', 'jacket_vect', 'jamster_vect', 'jan_vect', 'january_vect', 'jason_vect', 'java_vect', 'jay_vect', 'jazz_vect', 'jealous_vect', 'jeans_vect', 'jen_vect', 'jenny_vect', 'jess_vect', 'jesus_vect', 'jiayin_vect', 'jiu_vect', 'jo_vect', 'joanna_vect', 'job_vect', 'jogging_vect', 'john_vect', 'join_vect', 'joined_vect', 'joining_vect', 'joke_vect', 'jokes_vect', 'jokin_vect', 'joking_vect', 'jolly_vect', 'joy_vect', 'jsco_vect', 'jst_vect', 'juan_vect', 'juicy_vect', 'july_vect', 'june_vect', 'jus_vect', 'juz_vect', 'k52_vect', 'kadeem_vect', 'kaiez_vect', 'kallis_vect', 'kano_vect', 'kappa_vect', 'karaoke_vect', 'kate_vect', 'kay_vect', 'kb_vect', 'ke_vect', 'keep_vect', 'keeping_vect', 'keeps_vect', 'kent_vect', 'kept_vect', 'kerala_vect', 'key_vect', 'keys_vect', 'ki_vect', 'kick_vect', 'kid_vect', 'kidding_vect', 'kids_vect', 'kidz_vect', 'kind_vect', 'kinda_vect', 'kindly_vect', 'king_vect', 'kiss_vect', 'kisses_vect', 'kk_vect', 'knackered_vect', 'knew_vect', 'knock_vect', 'know_vect', 'knowing_vect', 'knows_vect', 'knw_vect', 'kz_vect', 'l8r_vect', 'la_vect', 'lab_vect', 'ladies_vect', 'lady_vect', 'lag_vect', 'laid_vect', 'land_vect', 'landline_vect', 'langport_vect', 'language_vect', 'laptop_vect', 'lar_vect', 'largest_vect', 'last_vect', 'late_vect', 'later_vect', 'latest_vect', 'latr_vect', 'laugh_vect', 'laughing_vect', 'law_vect', 'lazy_vect', 'ldn_vect', 'ldnw15h_vect', 'le_vect', 'lead_vect', 'learn_vect', 'least_vect', 'leave_vect', 'leaves_vect', 'leaving_vect', 'lect_vect', 'lecture_vect', 'left_vect', 'legal_vect', 'legs_vect', 'leh_vect', 'lei_vect', 'lem_vect', 'less_vect', 'lesson_vect', 'lessons_vect', 'let_vect', 'lets_vect', 'letter_vect', 'letters_vect', 'liao_vect', 'library_vect', 'lick_vect', 'licks_vect', 'lido_vect', 'lie_vect', 'lies_vect', 'life_vect', 'lifetime_vect', 'lift_vect', 'light_vect', 'lik_vect', 'like_vect', 'liked_vect', 'likely_vect', 'likes_vect', 'lil_vect', 'line_vect', 'linerental_vect', 'lines_vect', 'link_vect', 'lion_vect', 'lionm_vect', 'lionp_vect', 'lions_vect', 'lip_vect', 'list_vect', 'listen_vect', 'listening_vect', 'literally_vect', 'little_vect', 'live_vect', 'liverpool_vect', 'living_vect', 'lk_vect', 'll_vect', 'lmao_vect', 'lo_vect', 'loads_vect', 'loan_vect', 'loans_vect', 'local_vect', 'locations_vect', 'lock_vect', 'log_vect', 'login_vect', 'logo_vect', 'logopic_vect', 'lol_vect', 'london_vect', 'lonely_vect', 'long_vect', 'longer_vect', 'look_vect', 'lookatme_vect', 'looked_vect', 'lookin_vect', 'looking_vect', 'looks_vect', 'lor_vect', 'lose_vect', 'losing_vect', |
'loss_vect', 'lost_vect', 'lot_vect', 'lotr_vect', 'lots_vect', 'lou_vect', 'loud_vect', 'lounge_vect', 'lousy_vect', 'lovable_vect', 'love_vect', 'loved_vect', 'lovely_vect', 'loveme_vect', 'lover_vect', 'loverboy_vect', 'lovers_vect', 'loves_vect', 'loving_vect', 'low_vect', 'lower_vect', 'loyal_vect', 'loyalty_vect', 'ls15hb_vect', 'lt_vect', 'ltd_vect', 'luck_vect', 'lucky_vect', 'lucy_vect', 'lunch_vect', 'luv_vect', 'lux_vect', 'luxury_vect', 'lyf_vect', 'lyfu_vect', 'lyk_vect', 'm227xy_vect', 'm26_vect', 'm263uz_vect', 'm8s_vect', 'mac_vect', 'machan_vect', 'macho_vect', 'mad_vect', 'madam_vect', 'made_vect', 'mag_vect', 'maga_vect', 'magical_vect', 'mah_vect', 'mail_vect', 'mailbox_vect', 'main_vect', 'maintain_vect', 'major_vect', 'make_vect', 'makes_vect', 'makin_vect', 'making_vect', 'malaria_vect', 'male_vect', 'mall_vect', 'man_vect', 'managed_vect', 'management_vect', 'many_vect', 'map_vect', 'march_vect', 'mark_vect', 'market_vect', 'marriage_vect', 'married_vect', 'marry_vect', 'masters_vect', 'match_vect', 'matches_vect', 'mate_vect', 'mates_vect', 'matrix3_vect', 'matter_vect', 'max10mins_vect', 'maximize_vect', 'maxå_vect', 'may_vect', 'mayb_vect', 'maybe_vect', 'mca_vect', 'mcat_vect', 'meal_vect', 'mean_vect', 'meaning_vect', 'means_vect', 'meant_vect', 'meanwhile_vect', 'med_vect', 'medical_vect', 'medicine_vect', 'meds_vect', 'meet_vect', 'meetin_vect', 'meeting_vect', 'meh_vect', 'mei_vect', 'member_vect', 'members_vect', 'men_vect', 'menu_vect', 'merry_vect', 'mess_vect', 'message_vect', 'messaged_vect', 'messages_vect', 'messaging_vect', 'met_vect', 'mid_vect', 'middle_vect', 'midnight_vect', 'mids_vect', 'might_vect', 'miles_vect', 'milk_vect', 'min_vect', 'mind_vect', 'mine_vect', 'mini_vect', 'minimum_vect', 'minor_vect', 'mins_vect', 'minute_vect', 'minutes_vect', 'minuts_vect', 'miracle_vect', 'mis_vect', 'miserable_vect', 'miss_vect', 'missed_vect', 'missin_vect', 'missing_vect', 'mistake_vect', 'mistakes_vect', 'mite_vect', 'mm_vect', 'mmm_vect', 'mmmm_vect', 'mmmmmm_vect', 'mnths_vect', 'mo_vect', 'moan_vect', 'mob_vect', 'mobile_vect', 'mobiles_vect', 'mobilesdirect_vect', 'mobilesvary_vect', 'mobileupd8_vect', 'mobno_vect', 'moby_vect', 'mode_vect', 'model_vect', 'module_vect', 'modules_vect', 'moji_vect', 'mojibiola_vect', 'mokka_vect', 'mom_vect', 'moment_vect', 'moments_vect', 'moms_vect', 'mon_vect', 'monday_vect', 'money_vect', 'monkeys_vect', 'mono_vect', 'month_vect', 'monthly_vect', 'months_vect', 'mood_vect', 'moon_vect', 'moral_vect', 'morn_vect', 'morning_vect', 'mostly_vect', 'mother_vect', 'motorola_vect', 'mouth_vect', 'move_vect', 'moved_vect', 'movie_vect', 'movies_vect', 'moving_vect', 'mp3_vect', 'mr_vect', 'mrng_vect', 'mrt_vect', 'msg_vect', 'msgs_vect', 'mths_vect', 'mu_vect', 'much_vect', 'mum_vect', 'mummy_vect', 'murder_vect', 'murdered_vect', 'murderer_vect', 'music_vect', 'must_vect', 'muz_vect', 'na_vect', 'nag_vect', 'nah_vect', 'naked_vect', 'name_vect', 'name1_vect', 'name2_vect', 'named_vect', 'names_vect', 'nan_vect', 'nap_vect', 'nasdaq_vect', 'nat_vect', 'national_vect', 'natural_vect', 'nature_vect', 'naughty_vect', 'nb_vect', 'nd_vect', 'ne_vect', 'near_vect', 'nearly_vect', 'necessarily_vect', 'necklace_vect', 'ned_vect', 'need_vect', 'needed_vect', 'needs_vect', 'neither_vect', 'net_vect', 'netcollex_vect', 'network_vect', 'networks_vect', 'neva_vect', 'never_vect', 'new_vect', 'newest_vect', 'news_vect', 'next_vect', 'ni8_vect', 'nice_vect', 'nigeria_vect', 'night_vect', 'nights_vect', 'nimya_vect', 'nite_vect', 'no1_vect', 'nobody_vect', 'noe_vect', 'nokia_vect', 'nokias_vect', 'noline_vect', 'none_vect', 'noon_vect', 'nope_vect', 'norm_vect', 'norm150ptone_vect', 'normal_vect', 'normally_vect', 'northampton_vect', 'note_vect', 'nothin_vect', 'nothing_vect', 'notice_vect', 'noun_vect', 'nowadays_vect', 'nt_vect', 'ntt_vect', 'ntwk_vect', 'num_vect', 'number_vect', 'numbers_vect', 'nuther_vect', 'nvm_vect', 'nw_vect', 'nxt_vect', 'nyc_vect', 'nydc_vect', 'nyt_vect', 'o2_vect', 'obviously_vect', 'odi_vect', 'offer_vect', 'offers_vect', 'office_vect', 'official_vect', 'officially_vect', 'ofice_vect', 'often_vect', 'oh_vect', 'oi_vect', 'oic_vect', 'ok_vect', 'okay_vect', 'okey_vect', 'okie_vect', 'ola_vect', 'old_vect', 'omg_vect', 'omw_vect', 'one_vect', 'ones_vect', 'oni_vect', 'online_vect', 'onto_vect', 'onwards_vect', 'oooh_vect', 'oops_vect', 'open_vect', 'opening_vect', 'operator_vect', 'opinion_vect', 'opportunity_vect', 'opt_vect', 'option_vect', 'optout_vect', 'or2stoptxt_vect', 'orange_vect', 'oranges_vect', 'orchard_vect', 'order_vect', 'ordered_vect', 'oredi_vect', 'original_vect', 'oru_vect', 'os_vect', 'oso_vect', 'others_vect', 'otherwise_vect', 'otside_vect', 'outside_vect', 'outstanding_vect', 'outta_vect', 'ovulation_vect', 'oz_vect', 'pa_vect', 'pack_vect', 'package_vect', 'page_vect', 'pages_vect', 'paid_vect', 'pain_vect', 'painful_vect', 'painting_vect', 'panic_vect', 'paper_vect', 'papers_vect', 'paperwork_vect', 'parco_vect', 'parent_vect', 'parents_vect', 'paris_vect', 'park_vect', 'parked_vect', 'parking_vect', 'part_vect', 'partner_vect', 'partnership_vect', 'party_vect', 'pass_vect', 'passed_vect', 'password_vect', 'past_vect', 'pattern_vect', 'patty_vect', 'pay_vect', 'paying_vect', 'payment_vect', 'payoh_vect', 'pc_vect', 'peace_vect', 'pence_vect', 'people_vect', 'per_vect', 'perfect_vect', 'period_vect', 'person_vect', 'personal_vect', 'pete_vect', 'petey_vect', 'pg_vect', 'philosophy_vect', 'phoenix_vect', 'phone_vect', 'phones_vect', 'photo_vect', 'photos_vect', 'pic_vect', 'pick_vect', 'picked_vect', 'picking_vect', 'pics_vect', 'picsfree1_vect', 'picture_vect', 'pictures_vect', 'pie_vect', 'pieces_vect', 'pig_vect', 'pilates_vect', 'pin_vect', 'pink_vect', 'piss_vect', 'pissed_vect', 'pix_vect', 'pizza_vect', 'place_vect', 'places_vect', 'plan_vect', 'planned_vect', 'planning_vect', 'plans_vect', 'play_vect', 'played_vect', 'player_vect', 'players_vect', 'playing_vect', 'please_vect', 'pleased_vect', 'pleasure_vect', 'plenty_vect', 'pls_vect', 'plus_vect', 'plz_vect', 'pm_vect', 'po_vect', 'pobox_vect', 'pobox334_vect', 'pobox36504w45wq_vect', 'pobox45w2tg150p_vect', 'pobox84_vect', 'pod_vect', 'point_vect', 'points_vect', 'poker_vect', 'pole_vect', 'police_vect', 'politicians_vect', 'poly_vect', 'polyphonic_vect', 'polys_vect', 'pongal_vect', 'poor_vect', 'pop_vect', 'popped_vect', 'porn_vect', 'possession_vect', 'possible_vect', 'post_vect', 'postcard_vect', 'postcode_vect', 'posted_vect', 'posts_vect', 'potential_vect', 'potter_vect', 'pound_vect', 'pounds_vect', 'pouts_vect', 'power_vect', 'ppl_vect', 'pple_vect', 'ppm_vect', 'prabha_vect', 'practice_vect', 'practicing_vect', 'pray_vect', 'prefer_vect', 'premier_vect', 'prepare_vect', 'prescription_vect', 'present_vect', 'press_vect', 'pretty_vect', 'prey_vect', 'price_vect', 'prince_vect', 'princess_vect', 'print_vect', 'privacy_vect', 'private_vect', 'prize_vect', 'prob_vect', 'probably_vect', 'problem_vect', 'problems_vect', 'process_vect', 'processed_vect', 'prof_vect', 'profit_vect', 'program_vect', 'project_vect', 'prolly_vect', 'promise_vect', 'promises_vect', 'promo_vect', 'proof_vect', 'properly_vect', 'prospects_vect', 'provided_vect', 'ps_vect', 'ptbo_vect', 'pub_vect', 'pull_vect', 'purchase_vect', 'purity_vect', 'purpose_vect', 'push_vect', 'pushes_vect', 'pussy_vect', 'put_vect', 'puttin_vect', 'putting_vect', 'qatar_vect', 'quality_vect', 'queen_vect', 'ques_vect', 'question_vect', 'questions_vect', 'quick_vect', 'quickly_vect', 'quiet_vect', 'quit_vect', 'quite_vect', 'quiz_vect', 'quote_vect', 'quoting_vect', 'racing_vect', 'radio_vect', 'railway_vect', 'rain_vect', 'raining_vect', 'raise_vect', 'rakhesh_vect', 'rally_vect', 'ran_vect', 'random_vect', 'randomly_vect', 'randy_vect', 'rang_vect', 'range_vect', 'ranjith_vect', 'rate_vect', 'rates_vect', 'rather_vect', 'rays_vect', 'rcvd_vect', 'rd_vect', 're_vect', 'reach_vect', 'reached_vect', 'reaching_vect', 'reaction_vect', 'read_vect', 'readers_vect', 'reading_vect', 'ready_vect', 'real_vect', 'realise_vect', 'reality_vect', 'realized_vect', 'really_vect', 'realy_vect', 'reason_vect', 'reasonable_vect', 'reasons_vect', 'reboot_vect', 'recd_vect', 'receipt_vect', 'receive_vect', 'received_vect', 'receiving_vect', 'recent_vect', 'recently_vect', 'recession_vect', 'record_vect', 'records_vect', 'recovery_vect', 'red_vect', 'ref_vect', 'reference_vect', 'reg_vect', 'regards_vect', 'register_vect', 'registered_vect', 'regret_vect', 'regular_vect', 'relation_vect', 'relax_vect', 'released_vect', 'rem_vect', 'remain_vect', 'remains_vect', 'rem |
ember_vect', 'remembered_vect', 'remembr_vect', 'remind_vect', 'reminder_vect', 'remove_vect', 'rent_vect', 'rental_vect', 'rentl_vect', 'repair_vect', 'repeat_vect', 'replied_vect', 'reply_vect', 'replying_vect', 'report_vect', 'representative_vect', 'request_vect', 'requests_vect', 'research_vect', 'resend_vect', 'respect_vect', 'respond_vect', 'responding_vect', 'response_vect', 'responsibility_vect', 'rest_vect', 'restaurant_vect', 'result_vect', 'results_vect', 'retrieve_vect', 'return_vect', 'returned_vect', 'returns_vect', 'reveal_vect', 'revealed_vect', 'review_vect', 'revision_vect', 'reward_vect', 'rhythm_vect', 'rice_vect', 'rich_vect', 'ride_vect', 'right_vect', 'rights_vect', 'ring_vect', 'ringtone_vect', 'ringtones_vect', 'rite_vect', 'river_vect', 'road_vect', 'roads_vect', 'roast_vect', 'rock_vect', 'rocks_vect', 'rofl_vect', 'roger_vect', 'role_vect', 'ron_vect', 'room_vect', 'roommate_vect', 'roommates_vect', 'rooms_vect', 'rose_vect', 'round_vect', 'row_vect', 'roww1j6hl_vect', 'roww1jhl_vect', 'royal_vect', 'rply_vect', 'rs_vect', 'rstm_vect', 'ru_vect', 'rub_vect', 'rude_vect', 'rule_vect', 'run_vect', 'running_vect', 'runs_vect', 'rush_vect', 'sacrifice_vect', 'sad_vect', 'sae_vect', 'safe_vect', 'said_vect', 'salam_vect', 'salary_vect', 'sale_vect', 'salon_vect', 'sam_vect', 'santa_vect', 'sar_vect', 'sarasota_vect', 'sarcastic_vect', 'sary_vect', 'sat_vect', 'sathya_vect', 'saturday_vect', 'savamob_vect', 'save_vect', 'saved_vect', 'saw_vect', 'say_vect', 'saying_vect', 'says_vect', 'scared_vect', 'scary_vect', 'sch_vect', 'schedule_vect', 'school_vect', 'schools_vect', 'science_vect', 'scold_vect', 'score_vect', 'scores_vect', 'scotland_vect', 'scream_vect', 'screaming_vect', 'scrounge_vect', 'se_vect', 'sea_vect', 'search_vect', 'searching_vect', 'season_vect', 'seat_vect', 'sec_vect', 'second_vect', 'seconds_vect', 'secret_vect', 'secretary_vect', 'secs_vect', 'sed_vect', 'see_vect', 'seeing_vect', 'seem_vect', 'seemed_vect', 'seems_vect', 'seen_vect', 'selected_vect', 'selection_vect', 'self_vect', 'sell_vect', 'selling_vect', 'sells_vect', 'sem_vect', 'semester_vect', 'sen_vect', 'send_vect', 'sender_vect', 'sending_vect', 'sense_vect', 'sent_vect', 'sentence_vect', 'sept_vect', 'series_vect', 'serious_vect', 'seriously_vect', 'service_vect', 'services_vect', 'serving_vect', 'set_vect', 'setting_vect', 'settings_vect', 'settle_vect', 'settled_vect', 'seven_vect', 'several_vect', 'sex_vect', 'sexy_vect', 'sh_vect', 'sha_vect', 'shall_vect', 'share_vect', 'shd_vect', 'sheets_vect', 'shes_vect', 'shesil_vect', 'shining_vect', 'ship_vect', 'shipping_vect', 'shirt_vect', 'shirts_vect', 'shit_vect', 'shld_vect', 'shocking_vect', 'shoot_vect', 'shop_vect', 'shoppin_vect', 'shopping_vect', 'short_vect', 'shorter_vect', 'shortly_vect', 'shot_vect', 'shoving_vect', 'show_vect', 'shower_vect', 'showing_vect', 'shows_vect', 'shu_vect', 'shuhui_vect', 'shy_vect', 'si_vect', 'sick_vect', 'side_vect', 'sighs_vect', 'sight_vect', 'sign_vect', 'signing_vect', 'silence_vect', 'silent_vect', 'silver_vect', 'sim_vect', 'simple_vect', 'simply_vect', 'since_vect', 'sing_vect', 'singing_vect', 'single_vect', 'singles_vect', 'sipix_vect', 'sir_vect', 'sis_vect', 'sister_vect', 'sit_vect', 'site_vect', 'sitting_vect', 'situation_vect', 'siva_vect', 'six_vect', 'size_vect', 'sk3_vect', 'sk38xh_vect', 'skilgme_vect', 'skip_vect', 'sky_vect', 'skype_vect', 'skyped_vect', 'slap_vect', 'slave_vect', 'sleep_vect', 'sleepin_vect', 'sleeping_vect', 'sleepy_vect', 'slept_vect', 'slice_vect', 'slide_vect', 'slightly_vect', 'slip_vect', 'slippers_vect', 'slo_vect', 'slots_vect', 'slow_vect', 'slowly_vect', 'small_vect', 'smashed_vect', 'smile_vect', 'smiles_vect', 'smiling_vect', 'smoke_vect', 'smoking_vect', 'sms_vect', 'smth_vect', 'sn_vect', 'snake_vect', 'snow_vect', 'social_vect', 'sofa_vect', 'soft_vect', 'software_vect', 'sol_vect', 'some1_vect', 'somebody_vect', 'someone_vect', 'somethin_vect', 'something_vect', 'sometimes_vect', 'somewhere_vect', 'song_vect', 'songs_vect', 'sony_vect', 'sonyericsson_vect', 'soon_vect', 'sooner_vect', 'sore_vect', 'sorry_vect', 'sort_vect', 'sorting_vect', 'sound_vect', 'sounds_vect', 'south_vect', 'sp_vect', 'space_vect', 'spanish_vect', 'speak_vect', 'speaking_vect', 'special_vect', 'specialcall_vect', 'specially_vect', 'speed_vect', 'spend_vect', 'spending_vect', 'spent_vect', 'spk_vect', 'spoke_vect', 'spoken_vect', 'spook_vect', 'sport_vect', 'sports_vect', 'spree_vect', 'spring_vect', 'sptv_vect', 'sry_vect', 'st_vect', 'staff_vect', 'stamps_vect', 'stand_vect', 'standard_vect', 'standing_vect', 'star_vect', 'staring_vect', 'start_vect', 'started_vect', 'starting_vect', 'starts_vect', 'starwars3_vect', 'statement_vect', 'station_vect', 'stay_vect', 'stayed_vect', 'staying_vect', 'std_vect', 'steam_vect', 'step_vect', 'steve_vect', 'stick_vect', 'sticky_vect', 'still_vect', 'stock_vect', 'stockport_vect', 'stomach_vect', 'stomps_vect', 'stones_vect', 'stop_vect', 'stopped_vect', 'stops_vect', 'store_vect', 'stores_vect', 'story_vect', 'str_vect', 'straight_vect', 'stranger_vect', 'street_vect', 'stress_vect', 'strike_vect', 'strong_vect', 'strongbuy_vect', 'stuck_vect', 'student_vect', 'study_vect', 'studying_vect', 'stuff_vect', 'stupid_vect', 'style_vect', 'stylish_vect', 'sub_vect', 'subpoly_vect', 'subs_vect', 'subscribe6gbpmnth_vect', 'subscribed_vect', 'subscriber_vect', 'subscription_vect', 'success_vect', 'successful_vect', 'successfully_vect', 'sucks_vect', 'sue_vect', 'sufficient_vect', 'suggest_vect', 'suite_vect', 'suits_vect', 'sum1_vect', 'summer_vect', 'sun_vect', 'sunday_vect', 'sunlight_vect', 'sunny_vect', 'sunshine_vect', 'suntec_vect', 'sup_vect', 'super_vect', 'superb_vect', 'superior_vect', 'supervisor_vect', 'supply_vect', 'support_vect', 'suppose_vect', 'supposed_vect', 'suprman_vect', 'sura_vect', 'sure_vect', 'surely_vect', 'surfing_vect', 'surprise_vect', 'surprised_vect', 'survey_vect', 'sux_vect', 'suzy_vect', 'sw7_vect', 'sw73ss_vect', 'sweet_vect', 'swing_vect', 'system_vect', 'ta_vect', 'tablets_vect', 'tahan_vect', 'take_vect', 'taken_vect', 'takes_vect', 'takin_vect', 'taking_vect', 'talent_vect', 'talk_vect', 'talking_vect', 'tampa_vect', 'tape_vect', 'tariffs_vect', 'tat_vect', 'taunton_vect', 'taylor_vect', 'tb_vect', 'tc_vect', 'tcrw1_vect', 'tcs_vect', 'tea_vect', 'teach_vect', 'teacher_vect', 'teaches_vect', 'team_vect', 'tear_vect', 'tease_vect', 'teasing_vect', 'tech_vect', 'technical_vect', 'tee_vect', 'teeth_vect', 'tel_vect', 'telephone_vect', 'tell_vect', 'telling_vect', 'tells_vect', 'telugu_vect', 'temple_vect', 'ten_vect', 'tenants_vect', 'tenerife_vect', 'tension_vect', 'term_vect', 'terms_vect', 'terrible_vect', 'test_vect', 'testing_vect', 'text_vect', 'texted_vect', 'texting_vect', 'textoperator_vect', 'texts_vect', 'th_vect', 'thangam_vect', 'thank_vect', 'thanks_vect', 'thanksgiving_vect', 'thanx_vect', 'that_vect', 'thats_vect', 'thatåõs_vect', 'the_vect', 'theatre_vect', 'themob_vect', 'theory_vect', 'thesis_vect', 'thgt_vect', 'thing_vect', 'things_vect', 'think_vect', 'thinkin_vect', 'thinking_vect', 'thinks_vect', 'thk_vect', 'thnk_vect', 'tho_vect', 'though_vect', 'thought_vect', 'three_vect', 'throat_vect', 'throw_vect', 'thru_vect', 'tht_vect', 'thts_vect', 'thurs_vect', 'thursday_vect', 'tick_vect', 'ticket_vect', 'tickets_vect', 'tight_vect', 'tihs_vect', 'til_vect', 'till_vect', 'time_vect', 'times_vect', 'timing_vect', 'tired_vect', 'tirunelvali_vect', 'tirupur_vect', 'tissco_vect', 'tkts_vect', 'tm_vect', 'tming_vect', 'tmobile_vect', 'tmr_vect', 'tncs_vect', 'toa_vect', 'toclaim_vect', 'today_vect', 'todays_vect', 'tog_vect', 'together_vect', 'tok_vect', 'told_vect', 'tomarrow_vect', 'tomo_vect', 'tomorrow_vect', 'tone_vect', 'tones_vect', 'tones2youcouk_vect', 'tonight_vect', 'tonite_vect', 'took_vect', 'tool_vect', 'tooo_vect', 'toot_vect', 'top_vect', 'topic_vect', 'torch_vect', 'toshiba_vect', 'tot_vect', 'total_vect', 'totally_vect', 'touch_vect', 'tough_vect', 'tour_vect', 'towards_vect', 'town_vect', 'track_vect', 'trade_vect', 'traffic_vect', 'train_vect', 'training_vect', 'transaction_vect', 'transfer_vect', 'transport_vect', 'travel_vect', 'treat_vect', 'treated_vect', 'tried_vect', 'trip_vect', 'trips_vect', 'trouble_vect', 'true_vect', 'truffles_vect', 'truly_vect', 'trust_vect', 'truth_vect', 'try_vect', 'trying_vect', 'ts_vect', 'tscs_vect', 'tscs087147403231winawk_vect', 'tt_vect', 'ttyl_vect', 'tues_vect', 'tuesday_vect', 'tuition_vect', 'turn_vect', 'turning_vect', 'turns_vect', 'tv_vect', 'twelve_vect', 'twice_vect', 'two_vect', 'txt_vect', 'txtauction_vect', 'txtin_vect', 'txting_vect', 'txtno_vect', 'txts_ |
vect', 'txtstop_vect', 'tyler_vect', 'type_vect', 'tyrone_vect', 'u4_vect', 'ubi_vect', 'ufind_vect', 'ugh_vect', 'uh_vect', 'uk_vect', 'uks_vect', 'ultimatum_vect', 'umma_vect', 'unable_vect', 'uncle_vect', 'understand_vect', 'understanding_vect', 'understood_vect', 'underwear_vect', 'unemployed_vect', 'uni_vect', 'unique_vect', 'university_vect', 'unless_vect', 'unlimited_vect', 'unnecessarily_vect', 'unredeemed_vect', 'unsold_vect', 'unsub_vect', 'unsubscribe_vect', 'upd8_vect', 'update_vect', 'updatenow_vect', 'upgrade_vect', 'upload_vect', 'upset_vect', 'upstairs_vect', 'ur_vect', 'ure_vect', 'urgent_vect', 'urgnt_vect', 'url_vect', 'urn_vect', 'urself_vect', 'us_vect', 'usb_vect', 'use_vect', 'used_vect', 'user_vect', 'usf_vect', 'using_vect', 'usual_vect', 'usually_vect', 'vale_vect', 'valentine_vect', 'valentines_vect', 'valid_vect', 'valid12hrs_vect', 'valuable_vect', 'value_vect', 'valued_vect', 'vary_vect', 've_vect', 'vegas_vect', 'verify_vect', 'version_vect', 'via_vect', 'vid_vect', 'video_vect', 'videochat_vect', 'videophones_vect', 'vijay_vect', 'vikky_vect', 'village_vect', 'violated_vect', 'violence_vect', 'vip_vect', 'virgin_vect', 'visit_vect', 'vivek_vect', 'vl_vect', 'voda_vect', 'vodafone_vect', 'vodka_vect', 'voice_vect', 'voicemail_vect', 'vomit_vect', 'vote_vect', 'voucher_vect', 'vouchers_vect', 'vry_vect', 'vth_vect', 'w45wq_vect', 'wa_vect', 'wah_vect', 'wait_vect', 'waited_vect', 'waitin_vect', 'waiting_vect', 'wake_vect', 'waking_vect', 'wales_vect', 'walk_vect', 'walked_vect', 'walking_vect', 'walmart_vect', 'wan_vect', 'wana_vect', 'want_vect', 'wanted_vect', 'wanting_vect', 'wants_vect', 'wap_vect', 'warm_vect', 'warner_vect', 'waste_vect', 'wasted_vect', 'wat_vect', 'watch_vect', 'watching_vect', 'water_vect', 'wats_vect', 'way_vect', 'wc1n3xx_vect', 'we_vect', 'weak_vect', 'wear_vect', 'wearing_vect', 'weather_vect', 'web_vect', 'website_vect', 'wed_vect', 'wedding_vect', 'wednesday_vect', 'wee_vect', 'weed_vect', 'week_vect', 'weekend_vect', 'weekends_vect', 'weekly_vect', 'weeks_vect', 'weigh_vect', 'weight_vect', 'weird_vect', 'welcome_vect', 'well_vect', 'welp_vect', 'wen_vect', 'went_vect', 'west_vect', 'wet_vect', 'what_vect', 'whatever_vect', 'whats_vect', 'whenever_vect', 'whenevr_vect', 'wherever_vect', 'whether_vect', 'white_vect', 'whn_vect', 'whole_vect', 'whos_vect', 'whose_vect', 'wid_vect', 'widelivecomindex_vect', 'wif_vect', 'wife_vect', 'wil_vect', 'willing_vect', 'win_vect', 'wind_vect', 'wine_vect', 'winner_vect', 'winning_vect', 'wins_vect', 'wipro_vect', 'wisdom_vect', 'wise_vect', 'wish_vect', 'wishes_vect', 'wishing_vect', 'wit_vect', 'within_vect', 'without_vect', 'wiv_vect', 'wk_vect', 'wkend_vect', 'wkg_vect', 'wkly_vect', 'wks_vect', 'wld_vect', 'wml_vect', 'wn_vect', 'wnt_vect', 'wo_vect', 'woke_vect', 'woken_vect', 'woman_vect', 'women_vect', 'wonder_vect', 'wonderful_vect', 'wondering_vect', 'wont_vect', 'woot_vect', 'word_vect', 'words_vect', 'work_vect', 'workin_vect', 'working_vect', 'works_vect', 'world_vect', 'worried_vect', 'worries_vect', 'worry_vect', 'worse_vect', 'worst_vect', 'worth_vect', 'wot_vect', 'would_vect', 'wow_vect', 'write_vect', 'wrong_vect', 'wtf_vect', 'wud_vect', 'wuld_vect', 'wun_vect', 'www4tcbiz_vect', 'wwwcomuknet_vect', 'wwwetlpcoukexpressoffer_vect', 'wwwgetzedcouk_vect', 'wwwldewcom_vect', 'wwwldewcom1win150ppmx3age16_vect', 'wwwmovietriviatv_vect', 'wwwringtonescouk_vect', 'wwwsmsconet_vect', 'wwwtxttowincouk_vect', 'wwwurawinnercom_vect', 'wylie_vect', 'xchat_vect', 'xmas_vect', 'xuhui_vect', 'xx_vect', 'xxx_vect', 'xxxx_vect', 'xxxxx_vect', 'xy_vect', 'ya_vect', 'yahoo_vect', 'yan_vect', 'yar_vect', 'yay_vect', 'yck_vect', 'yeah_vect', 'year_vect', 'years_vect', 'yelling_vect', 'yellow_vect', 'yep_vect', 'yes_vect', 'yest_vect', 'yesterday_vect', 'yet_vect', 'yetunde_vect', 'yijue_vect', 'ym_vect', 'yo_vect', 'yoga_vect', 'yogasana_vect', 'yor_vect', 'you_vect', 'yr_vect', 'yrs_vect', 'yummy_vect', 'yun_vect', 'yuo_vect', 'yup_vect', 'zed_vect', 'zindgi_vect', 'ìï_vect', 'ûò_vect'], 'input_features_type': {'v2': 'O'}, 'word2num_features': [], 'unpreprocessed_columns': [], 'force_numeric_conv': [], 'conversion_method': 'TF_IDF'}, 'selector': {'reducer': False, 'reducer_file': '', 'input_features': ['v2'], 'output_features': ['07xxxxxxxxx_vect', '08700621170150p_vect', '08702840625comuk_vect', '08718726270150gbpmtmsg18_vect', '1000s_vect', '10am7pm_vect', '10k_vect', '10p_vect', '10pmin_vect', '10ppm_vect', '11mths_vect', '125gift_vect', '12hrs_vect', '12mths_vect', '150p_vect', '150perwksub_vect', '150pm_vect', '150pmin_vect', '150pmsg_vect', '150pmsgrcvdhgsuite3422landsroww1j6hl_vect', '150pmtmsgrcvd18_vect', '150ppm_vect', '150ptone_vect', '150pwk_vect', '150week_vect', '16only_vect', '18only_vect', '1hr_vect', '1minmobsmorelkpobox177hp51fl_vect', '1st_vect', '1x150pwk_vect', '20p_vect', '20pmin_vect', '21st_vect', '220cm2_vect', '24hrs_vect', '25p_vect', '26th_vect', '2day_vect', '2find_vect', '2geva_vect', '2go_vect', '2marrow_vect', '2mrw_vect', '2nd_vect', '2nite_vect', '2optout_vect', '2p_vect', '2u_vect', '2waxsto_vect', '2wks_vect', '300p_vect', '31pmsg_vect', '3510i_vect', '3d_vect', '3g_vect', '3gbp_vect', '3hrs_vect', '3mins_vect', '3qxj9_vect', '3rd_vect', '3ss_vect', '3u_vect', '3uz_vect', '3wk_vect', '40gb_vect', '4a_vect', '4d_vect', '4eva_vect', '4get_vect', '4info_vect', '4mths_vect', '4th_vect', '4u_vect', '50p_vect', '5min_vect', '5pm_vect', '5wb_vect', '5we_vect', '60pmin_vect', '6hrs_vect', '6months_vect', '6pm_vect', '7250i_vect', '7ish_vect', '8am_vect', '8pm_vect', '8th_vect', '8wp_vect', '9ae_vect', '9ja_vect', '9pm_vect', '9t_vect', 'aathi_vect', 'abi_vect', 'ability_vect', 'abiola_vect', 'able_vect', 'abt_vect', 'abta_vect', 'aburo_vect', 'ac_vect', 'academic_vect', 'acc_vect', 'accept_vect', 'access_vect', 'accident_vect', 'accidentally_vect', 'accordingly_vect', 'account_vect', 'ache_vect', 'across_vect', 'acted_vect', 'action_vect', 'activate_vect', 'activities_vect', 'actor_vect', 'actual_vect', 'actually_vect', 'ad_vect', 'adam_vect', 'add_vect', 'added_vect', 'addicted_vect', 'addie_vect', 'address_vect', 'admin_vect', 'administrator_vect', 'admirer_vect', 'admit_vect', 'adore_vect', 'adoring_vect', 'ads_vect', 'adult_vect', 'advance_vect', 'adventure_vect', 'advice_vect', 'advise_vect', 'affair_vect', 'affairs_vect', 'affectionate_vect', 'afraid_vect', 'aft_vect', 'afternoon_vect', 'aftr_vect', 'agalla_vect', 'age_vect', 'age16_vect', 'ages_vect', 'ago_vect', 'agree_vect', 'ah_vect', 'aha_vect', 'ahead_vect', 'ahmad_vect', 'ai_vect', 'aight_vect', 'aint_vect', 'air_vect', 'airport_vect', 'airtel_vect', 'aiya_vect', 'aiyah_vect', 'aiyar_vect', 'aiyo_vect', 'al_vect', 'album_vect', 'alert_vect', 'alex_vect', 'alfie_vect', 'ali_vect', 'allah_vect', 'allow_vect', 'allowed_vect', 'almost_vect', 'alone_vect', 'along_vect', 'already_vect', 'alright_vect', 'alrite_vect', 'also_vect', 'always_vect', 'alwys_vect', 'amazing_vect', 'american_vect', 'among_vect', 'amount_vect', 'amp_vect', 'amt_vect', 'andros_vect', 'angry_vect', 'annie_vect', 'anniversary_vect', 'announcement_vect', 'anot_vect', 'another_vect', 'ans_vect', 'ansr_vect', 'answer_vect', 'answered_vect', 'answering_vect', 'answers_vect', 'anthony_vect', 'anti_vect', 'anybody_vect', 'anymore_vect', 'anyone_vect', 'anything_vect', 'anytime_vect', 'anyway_vect', 'anyways_vect', 'apartment_vect', 'app_vect', 'apparently_vect', 'applebees_vect', 'apply_vect', 'appointment_vect', 'appreciate_vect', 'appreciated_vect', 'approx_vect', 'apps_vect', 'appt_vect', 'april_vect', 'ar_vect', 'arcade_vect', 'ard_vect', 'area_vect', 'argh_vect', 'argument_vect', 'arm_vect', 'armand_vect', 'arms_vect', 'around_vect', 'arrange_vect', 'arrested_vect', 'arrive_vect', 'arsenal_vect', 'art_vect', 'arun_vect', 'asap_vect', 'ashley_vect', 'ask_vect', 'askd_vect', 'asked_vect', 'askin_vect', 'asking_vect', 'asks_vect', 'asleep_vect', 'ass_vect', 'assume_vect', 'ate_vect', 'atlanta_vect', 'atlast_vect', 'atm_vect', 'attached_vect', 'attempt_vect', 'attend_vect', 'auction_vect', 'august_vect', 'aunt_vect', 'aunty_vect', 'auto_vect', 'av_vect', 'available_vect', 'avatar_vect', 'ave_vect', 'avent_vect', |
'avoid_vect', 'await_vect', 'awaiting_vect', 'awake_vect', 'award_vect', 'awarded_vect', 'away_vect', 'awesome_vect', 'aww_vect', 'b4_vect', 'ba_vect', 'babe_vect', 'babes_vect', 'babies_vect', 'baby_vect', 'back_vect', 'bad_vect', 'bag_vect', 'bags_vect', 'bahamas_vect', 'bak_vect', 'balance_vect', 'bank_vect', 'banks_vect', 'bar_vect', 'barely_vect', 'basic_vect', 'basically_vect', 'bat_vect', 'bath_vect', 'bathe_vect', 'bathing_vect', 'battery_vect', 'bay_vect', 'bb_vect', 'bc_vect', 'bck_vect', 'bcoz_vect', 'bday_vect', 'be_vect', 'bears_vect', 'beautiful_vect', 'beauty_vect', 'bec_vect', 'become_vect', 'becoz_vect', 'bed_vect', 'bedrm_vect', 'bedroom_vect', 'beer_vect', 'befor_vect', 'beg_vect', 'begin_vect', 'behave_vect', 'behind_vect', 'bein_vect', 'believe_vect', 'bell_vect', 'belly_vect', 'belovd_vect', 'best_vect', 'bet_vect', 'better_vect', 'beyond_vect', 'bf_vect', 'bid_vect', 'bids_vect', 'big_vect', 'bigger_vect', 'biggest_vect', 'bill_vect', 'billed_vect', 'billion_vect', 'bills_vect', 'bin_vect', 'biola_vect', 'birds_vect', 'birla_vect', 'birth_vect', 'birthdate_vect', 'birthday_vect', 'bishan_vect', 'bit_vect', 'bitch_vect', 'bite_vect', 'black_vect', 'blackberry_vect', 'blah_vect', 'blake_vect', 'blank_vect', 'bleh_vect', 'bless_vect', 'blessing_vect', 'bloo_vect', 'blood_vect', 'bloody_vect', 'blue_vect', 'bluetooth_vect', 'bluff_vect', 'boat_vect', 'body_vect', 'bold_vect', 'bone_vect', 'bonus_vect', 'boo_vect', 'book_vect', 'booked_vect', 'booking_vect', 'books_vect', 'boost_vect', 'booty_vect', 'bored_vect', 'boring_vect', 'born_vect', 'boss_vect', 'boston_vect', 'bother_vect', 'bottom_vect', 'bought_vect', 'bout_vect', 'bowl_vect', 'box_vect', 'box326_vect', 'box334sk38ch_vect', 'box97n7qp_vect', 'boy_vect', 'boye_vect', 'boyfriend_vect', 'boys_vect', 'boytoy_vect', 'brah_vect', 'brand_vect', 'bread_vect', 'break_vect', 'breathe_vect', 'bright_vect', 'brilliant_vect', 'bring_vect', 'bringing_vect', 'brings_vect', 'british_vect', 'bro_vect', 'broad_vect', 'broke_vect', 'broken_vect', 'bros_vect', 'brothas_vect', 'brother_vect', 'brought_vect', 'bruv_vect', 'bslvyl_vect', 'bt_vect', 'btnationalrate_vect', 'btw_vect', 'bucks_vect', 'bud_vect', 'budget_vect', 'buff_vect', 'buffet_vect', 'bugis_vect', 'building_vect', 'buns_vect', 'burger_vect', 'burns_vect', 'bus_vect', 'buses_vect', 'business_vect', 'busy_vect', 'butt_vect', 'buy_vect', 'buying_vect', 'buzz_vect', 'bx420_vect', 'bx420ip45we_vect', 'bye_vect', 'ca_vect', 'cabin_vect', 'cafe_vect', 'cake_vect', 'cal_vect', 'calculation_vect', 'calicut_vect', 'california_vect', 'call_vect', 'call2optout674_vect', 'callback_vect', 'callcost_vect', 'called_vect', 'caller_vect', 'callers_vect', 'callertune_vect', 'callin_vect', 'calling_vect', 'calls_vect', 'callså_vect', 'cam_vect', 'camcorder_vect', 'came_vect', 'camera_vect', 'cameravideo_vect', 'campus_vect', 'can_vect', 'canada_vect', 'canal_vect', 'canary_vect', 'cancel_vect', 'cancelled_vect', 'cancer_vect', 'cant_vect', 'captain_vect', 'car_vect', 'card_vect', 'cardiff_vect', 'care_vect', 'cared_vect', 'career_vect', 'careful_vect', 'carefully_vect', 'caring_vect', 'carlos_vect', 'caroline_vect', 'cars_vect', 'cartoon_vect', 'case_vect', 'cash_vect', 'cashbalance_vect', 'cashin_vect', 'castor_vect', 'cat_vect', 'catch_vect', 'catching_vect', 'caught_vect', 'cause_vect', 'cbe_vect', 'cc_vect', 'cd_vect', 'cdgt_vect', 'cds_vect', 'celebrate_vect', 'celebration_vect', 'cell_vect', 'center_vect', 'centre_vect', 'certainly_vect', 'cha_vect', 'chain_vect', 'challenge_vect', 'chance_vect', 'change_vect', 'changed_vect', 'changes_vect', 'channel_vect', 'character_vect', 'charge_vect', 'charged_vect', 'charges_vect', 'charity_vect', 'charles_vect', 'chase_vect', 'chasing_vect', 'chat_vect', 'chatting_vect', 'cheap_vect', 'cheaper_vect', 'cheat_vect', 'chechi_vect', 'check_vect', 'checked_vect', 'checking_vect', 'cheers_vect', 'chennai_vect', 'cherish_vect', 'chest_vect', 'chicken_vect', 'chikku_vect', 'child_vect', 'childish_vect', 'children_vect', 'chill_vect', 'chillin_vect', 'china_vect', 'chinese_vect', 'chip_vect', 'chocolate_vect', 'choice_vect', 'choose_vect', 'chosen_vect', 'christ_vect', 'christmas_vect', 'church_vect', 'cine_vect', 'cinema_vect', 'citizen_vect', 'city_vect', 'claim_vect', 'claims_vect', 'claire_vect', 'class_vect', 'classes_vect', 'clean_vect', 'cleaning_vect', 'clear_vect', 'clearly_vect', 'click_vect', 'clock_vect', 'close_vect', 'closed_vect', 'closer_vect', 'closes_vect', 'clothes_vect', 'club_vect', 'cn_vect', 'co_vect', 'coast_vect', 'coat_vect', 'cochin_vect', 'code_vect', 'coffee_vect', 'coin_vect', 'coins_vect', 'cold_vect', 'colleagues_vect', 'collect_vect', 'collected_vect', 'collecting_vect', 'collection_vect', 'college_vect', 'colour_vect', 'come_vect', 'comedy_vect', 'comes_vect', 'comin_vect', 'coming_vect', 'commercial_vect', 'common_vect', 'community_vect', 'comp_vect', 'company_vect', 'competition_vect', 'complete_vect', 'completed_vect', 'completely_vect', 'complimentary_vect', 'computer_vect', 'concentrate_vect', 'concert_vect', 'conditions_vect', 'conducts_vect', 'confidence_vect', 'confirm_vect', 'congrats_vect', 'congratulations_vect', 'connection_vect', 'consider_vect', 'considering_vect', 'constant_vect', 'constantly_vect', 'contact_vect', 'contacted_vect', 'contacts_vect', 'content_vect', 'contents_vect', 'continue_vect', 'contract_vect', 'control_vect', 'convey_vect', 'convinced_vect', 'cool_vect', 'coping_vect', 'copy_vect', 'cornwall_vect', 'correct_vect', 'cos_vect', 'cost_vect', 'costa_vect', 'costs_vect', 'costå_vect', 'could_vect', 'count_vect', 'countin_vect', 'country_vect', 'couple_vect', 'course_vect', 'cover_vect', 'coz_vect', 'cr9_vect', 'cramps_vect', 'crave_vect', 'crazy_vect', 'created_vect', 'credit_vect', 'credited_vect', 'credits_vect', 'creepy_vect', 'crisis_vect', 'crore_vect', 'cross_vect', 'croydon_vect', 'cruise_vect', 'cry_vect', 'cs_vect', 'csbcm4235wc1n3xx_vect', 'csstop_vect', 'cud_vect', 'cuddling_vect', 'cum_vect', 'cup_vect', 'curious_vect', 'current_vect', 'currently_vect', 'cust_vect', 'custcare_vect', 'custcare08718720201_vect', 'custom_vect', 'customer_vect', 'customers_vect', 'cut_vect', 'cute_vect', 'cutting_vect', 'cuz_vect', 'cw25wx_vect', 'da_vect', 'dad_vect', 'daddy_vect', 'daily_vect', 'damn_vect', 'dance_vect', 'dancing_vect', 'dare_vect', 'dark_vect', 'darlin_vect', 'darling_vect', 'darren_vect', 'dat_vect', 'date_vect', 'dates_vect', 'dating_vect', 'dave_vect', 'day_vect', 'days_vect', 'de_vect', 'dead_vect', 'deal_vect', 'dealer_vect', 'dealing_vect', 'dear_vect', 'dearly_vect', 'death_vect', 'decide_vect', 'decided_vect', 'decimal_vect', 'decision_vect', 'deep_vect', 'def_vect', 'definite_vect', 'definitely_vect', 'del_vect', 'delete_vect', 'deleted_vect', 'delhi_vect', 'deliver_vect', 'delivered_vect', 'deliveredtomorrow_vect', 'delivery_vect', 'dem_vect', 'demand_vect', 'den_vect', 'denis_vect', 'department_vect', 'depends_vect', 'depressed_vect', 'derek_vect', 'desires_vect', 'desperate_vect', 'details_vect', 'dey_vect', 'dhoni_vect', 'dial_vect', 'dick_vect', 'dictionary_vect', 'didn_vect', 'didnt_vect', 'didt_vect', 'die_vect', 'died_vect', 'diet_vect', 'different_vect', 'difficult_vect', 'digital_vect', 'dignity_vect', 'din_vect', 'dinner_vect', 'dint_vect', 'direct_vect', 'directly_vect', 'dirty_vect', 'dis_vect', 'discount_vect', 'discuss_vect', 'dislikes_vect', 'display_vect', 'distance_vect', 'distract_vect', 'disturb_vect', 'division_vect', 'dload_vect', 'dnt_vect', 'doc_vect', 'docs_vect', 'doctor_vect', 'doesnt_vect', 'dog_vect', 'dogging_vect', 'doggy_vect', 'doin_vect', 'dollars_vect', 'don_vect', 'done_vect', 'dont_vect', 'donåõt_vect', 'door_vect', 'dorm_vect', 'double_vect', 'dough_vect', 'download_vect', 'downloads_vect', 'draw_vect', 'dream_vect', 'dreams_vect', 'dress_vect', 'dressed_vect', 'dresser_vect', 'drink_vect', 'drinking_vect', 'drinks_vect', 'drive_vect', 'driver_vect', 'drivin_vect', 'driving_vect', 'drop_vect', 'dropped_vect', 'drug_vect', 'drugs_vect', 'drunk_vect', 'dry_vect', 'ds_vect', 'dubsack_vect', 'dude_vect', 'due_vect', 'dun_vect', 'dunno_vect', 'durban_vect', 'dvd_vect', 'earlier_vect', 'early_vect', 'earth_vect', 'easier_vect', 'easily_vect', 'east_vect', 'easter_vect', 'easy_vect', 'eat_vect', 'eaten_vect', 'eatin_vect', 'eating_vect', 'ebay_vect', 'ec2a_vect', 'ee_vect', 'eek_vect', 'eerie_vect', 'effects_vect', 'eg_vect', 'egg_vect', 'eggs_vect', 'eh_vect', 'eight_vect', 'either_vect', 'ela_vect', 'electricity_vect', 'else_vect', 'elsewhere_vect', 'em_vect', 'email_vect', 'embarassed_vect', 'empty_vect', 'end_vect', 'ended_vect', 'ending_vect', 'ends_vect', 'enemy_vect', 'energy_vect', |
'eng_vect', 'engin_vect', 'england_vect', 'english_vect', 'enjoy_vect', 'enjoyed_vect', 'enough_vect', 'enter_vect', 'entered_vect', 'entitled_vect', 'entry_vect', 'enuff_vect', 'envelope_vect', 'er_vect', 'erm_vect', 'escape_vect', 'especially_vect', 'esplanade_vect', 'eta_vect', 'etc_vect', 'euro_vect', 'euro2004_vect', 'europe_vect', 'eve_vect', 'eveb_vect', 'even_vect', 'evening_vect', 'event_vect', 'ever_vect', 'every_vect', 'everybody_vect', 'everyday_vect', 'everyone_vect', 'everything_vect', 'everywhere_vect', 'evn_vect', 'evng_vect', 'ex_vect', 'exact_vect', 'exactly_vect', 'exam_vect', 'exams_vect', 'excellent_vect', 'except_vect', 'exciting_vect', 'excuse_vect', 'excuses_vect', 'executive_vect', 'exeter_vect', 'exhausted_vect', 'expect_vect', 'expecting_vect', 'expensive_vect', 'experience_vect', 'expired_vect', 'expires_vect', 'explain_vect', 'explicit_vect', 'explosive_vect', 'express_vect', 'extra_vect', 'eye_vect', 'eyes_vect', 'fa_vect', 'fab_vect', 'face_vect', 'facebook_vect', 'fact_vect', 'faggy_vect', 'failed_vect', 'fair_vect', 'faith_vect', 'fall_vect', 'falls_vect', 'family_vect', 'fan_vect', 'fancies_vect', 'fancy_vect', 'fantasies_vect', 'fantastic_vect', 'fantasy_vect', 'far_vect', 'farm_vect', 'fast_vect', 'faster_vect', 'fat_vect', 'father_vect', 'fathima_vect', 'fault_vect', 'fave_vect', 'favorite_vect', 'favour_vect', 'favourite_vect', 'fb_vect', 'feb_vect', 'february_vect', 'feel_vect', 'feelin_vect', 'feeling_vect', 'feels_vect', 'fees_vect', 'feet_vect', 'fell_vect', 'felt_vect', 'fetch_vect', 'fever_vect', 'field_vect', 'fifteen_vect', 'fight_vect', 'fighting_vect', 'figure_vect', 'file_vect', 'files_vect', 'fill_vect', 'filling_vect', 'fills_vect', 'film_vect', 'final_vect', 'finally_vect', 'find_vect', 'fine_vect', 'fingers_vect', 'finish_vect', 'finished_vect', 'first_vect', 'fit_vect', 'fix_vect', 'fixed_vect', 'flag_vect', 'flaked_vect', 'flash_vect', 'flat_vect', 'flight_vect', 'flights_vect', 'flirt_vect', 'floor_vect', 'flower_vect', 'fml_vect', 'fo_vect', 'follow_vect', 'followed_vect', 'following_vect', 'fone_vect', 'food_vect', 'fool_vect', 'football_vect', 'force_vect', 'foreign_vect', 'forever_vect', 'forevr_vect', 'forget_vect', 'forgets_vect', 'forgiven_vect', 'forgot_vect', 'format_vect', 'forums_vect', 'forward_vect', 'forwarded_vect', 'found_vect', 'four_vect', 'fr_vect', 'fran_vect', 'freak_vect', 'free_vect', 'freefone_vect', 'freemsg_vect', 'freephone_vect', 'freezing_vect', 'fren_vect', 'frens_vect', 'fret_vect', 'fri_vect', 'friday_vect', 'friend_vect', 'friends_vect', 'friendship_vect', 'fringe_vect', 'frm_vect', 'frnd_vect', 'frnds_vect', 'frndship_vect', 'fuck_vect', 'fuckin_vect', 'fucking_vect', 'ful_vect', 'full_vect', 'fullonsmscom_vect', 'fun_vect', 'funny_vect', 'future_vect', 'fyi_vect', 'gal_vect', 'gals_vect', 'game_vect', 'games_vect', 'gang_vect', 'gap_vect', 'gaps_vect', 'garage_vect', 'garbage_vect', 'gary_vect', 'gas_vect', 'gautham_vect', 'gave_vect', 'gay_vect', 'gd_vect', 'ge_vect', 'gee_vect', 'geeee_vect', 'geeeee_vect', 'gender_vect', 'generally_vect', 'genius_vect', 'gentle_vect', 'gentleman_vect', 'gently_vect', 'germany_vect', 'get_vect', 'gets_vect', 'gettin_vect', 'getting_vect', 'gf_vect', 'gibbs_vect', 'gift_vect', 'gim_vect', 'girl_vect', 'girls_vect', 'gist_vect', 'giv_vect', 'give_vect', 'given_vect', 'gives_vect', 'giving_vect', 'glad_vect', 'gn_vect', 'go_vect', 'goal_vect', 'god_vect', 'goes_vect', 'goin_vect', 'going_vect', 'gold_vect', 'gon_vect', 'gona_vect', 'gone_vect', 'good_vect', 'goodmorning_vect', 'goodnight_vect', 'goodnite_vect', 'google_vect', 'gorgeous_vect', 'gossip_vect', 'got_vect', 'goto_vect', 'gotten_vect', 'govtinstituitions_vect', 'gr8_vect', 'grace_vect', 'gram_vect', 'grand_vect', 'granite_vect', 'gravity_vect', 'great_vect', 'green_vect', 'greet_vect', 'greetings_vect', 'grins_vect', 'grl_vect', 'ground_vect', 'group_vect', 'gt_vect', 'guaranteed_vect', 'gud_vect', 'gudnite_vect', 'guess_vect', 'guessing_vect', 'guide_vect', 'guilty_vect', 'guy_vect', 'guys_vect', 'gym_vect', 'ha_vect', 'haf_vect', 'haha_vect', 'hai_vect', 'hair_vect', 'haiz_vect', 'half_vect', 'halloween_vect', 'ham_vect', 'hand_vect', 'handed_vect', 'handle_vect', 'hands_vect', 'handset_vect', 'hanging_vect', 'happen_vect', 'happened_vect', 'happening_vect', 'happens_vect', 'happiness_vect', 'happy_vect', 'hard_vect', 'hardcore_vect', 'hardly_vect', 'harry_vect', 'hate_vect', 'hav_vect', 'havent_vect', 'havenåõt_vect', 'havin_vect', 'head_vect', 'headache_vect', 'headin_vect', 'heads_vect', 'hear_vect', 'heard_vect', 'heart_vect', 'heavy_vect', 'hee_vect', 'height_vect', 'held_vect', 'helen_vect', 'hell_vect', 'hella_vect', 'hello_vect', 'help_vect', 'help08712400602450p_vect', 'helpline_vect', 'hence_vect', 'henry_vect', 'heri_vect', 'herlove_vect', 'hes_vect', 'hex_vect', 'hey_vect', 'hgsuite3422lands_vect', 'hi_vect', 'hide_vect', 'high_vect', 'hill_vect', 'hint_vect', 'hip_vect', 'history_vect', 'hit_vect', 'hiya_vect', 'hl_vect', 'hlp_vect', 'hmm_vect', 'hmmm_vect', 'hmv_vect', 'ho_vect', 'hockey_vect', 'hol_vect', 'hold_vect', 'holder_vect', 'holding_vect', 'holiday_vect', 'holla_vect', 'hols_vect', 'holy_vect', 'home_vect', 'honey_vect', 'hook_vect', 'hop_vect', 'hope_vect', 'hoped_vect', 'hopefully_vect', 'hoping_vect', 'horny_vect', 'horo_vect', 'horrible_vect', 'hospital_vect', 'hospitals_vect', 'hostel_vect', 'hot_vect', 'hotel_vect', 'hotels_vect', 'hour_vect', 'hours_vect', 'house_vect', 'housemaid_vect', 'however_vect', 'hows_vect', 'howz_vect', 'hr_vect', 'hrishi_vect', 'hrs_vect', 'http_vect', 'hubby_vect', 'hug_vect', 'huh_vect', 'hun_vect', 'hungry_vect', 'hunny_vect', 'hurry_vect', 'hurt_vect', 'hurting_vect', 'hurts_vect', 'husband_vect', 'hv_vect', 'hw_vect', 'hyde_vect', 'iam_vect', 'ibhltd_vect', 'ibiza_vect', 'ic_vect', 'ice_vect', 'id_vect', 'idea_vect', 'ideal_vect', 'ideas_vect', 'identifier_vect', 'idiot_vect', 'idk_vect', 'ignore_vect', 'ikea_vect', 'il_vect', 'ill_vect', 'im_vect', 'imagine_vect', 'imma_vect', 'immediately_vect', 'imp_vect', 'important_vect', 'impossible_vect', 'improved_vect', 'in2_vect', 'inc_vect', 'inches_vect', 'incident_vect', 'include_vect', 'including_vect', 'inclusive_vect', 'inconsiderate_vect', 'indeed_vect', 'india_vect', 'indian_vect', 'indians_vect', 'indicate_vect', 'infections_vect', 'infernal_vect', 'info_vect', 'inform_vect', 'information_vect', 'informed_vect', 'innings_vect', 'insha_vect', 'inside_vect', 'instantly_vect', 'instead_vect', 'instructions_vect', 'insurance_vect', 'intelligent_vect', 'interest_vect', 'interested_vect', 'interesting_vect', 'interflora_vect', 'internet_vect', 'intro_vect', 'invest_vect', 'invite_vect', 'invited_vect', 'inviting_vect', 'iouri_vect', 'ip4_vect', 'ipod_vect', 'irritating_vect', 'iscoming_vect', 'ish_vect', 'island_vect', 'islands_vect', 'isnt_vect', 'issue_vect', 'issues_vect', 'it_vect', 'italian_vect', 'its_vect', 'itz_vect', 'itåõs_vect', 'ive_vect', 'iz_vect', 'izzit_vect', 'iåõm_vect', 'jacket_vect', 'jamster_vect', 'jan_vect', 'january_vect', 'jason_vect', 'java_vect', 'jay_vect', 'jazz_vect', 'jealous_vect', 'jeans_vect', 'jen_vect', 'jenny_vect', 'jess_vect', 'jesus_vect', 'jiayin_vect', 'jiu_vect', 'jo_vect', 'joanna_vect', 'job_vect', 'jogging_vect', 'john_vect', 'join_vect', 'joined_vect', 'joining_vect', 'joke_vect', 'jokes_vect', 'jokin_vect', 'joking_vect', 'jolly_vect', 'joy_vect', 'jsco_vect', 'jst_vect', 'juan_vect', 'juicy_vect', 'july_vect', 'june_vect', 'jus_vect', 'juz_vect', 'k52_vect', 'kadeem_vect', 'kaiez_vect', 'kallis_vect', 'kano_vect', 'kappa_vect', 'karaoke_vect', 'kate_vect', 'kay_vect', 'kb_vect', 'ke_vect', 'keep_vect', 'keeping_vect', 'keeps_vect', 'kent_vect', 'kept_vect', 'kerala_vect', 'key_vect', 'keys_vect', 'ki_vect', 'kick_vect', 'kid_vect', 'kidding_vect', 'kids_vect', 'kidz_vect', 'kind_vect', 'kinda_vect', 'kindly_vect', 'king_vect', 'kiss_vect', 'kisses_vect', 'kk_vect', 'knackered_vect', 'knew_vect', 'knock_vect', 'know_vect', 'knowing_vect', 'knows_vect', 'knw_vect', 'kz_vect', 'l8r_vect', 'la_vect', 'lab_vect', 'ladies_vect', 'lady_vect', 'lag_vect', 'laid_vect', 'land_vect', 'landline_vect', 'langport_vect', 'language_vect', 'laptop_vect', 'lar_vect', 'largest_vect', 'last_vect', 'late_vect', 'later_vect', 'latest_vect', 'latr_vect', 'laugh_vect', 'laughing_vect', 'law_vect', 'lazy_vect', 'ldn_vect', 'ldnw15h_vect', 'le_vect', 'lead_vect', 'learn_vect', 'least_vect', 'leave_vect', |
'leaves_vect', 'leaving_vect', 'lect_vect', 'lecture_vect', 'left_vect', 'legal_vect', 'legs_vect', 'leh_vect', 'lei_vect', 'lem_vect', 'less_vect', 'lesson_vect', 'lessons_vect', 'let_vect', 'lets_vect', 'letter_vect', 'letters_vect', 'liao_vect', 'library_vect', 'lick_vect', 'licks_vect', 'lido_vect', 'lie_vect', 'lies_vect', 'life_vect', 'lifetime_vect', 'lift_vect', 'light_vect', 'lik_vect', 'like_vect', 'liked_vect', 'likely_vect', 'likes_vect', 'lil_vect', 'line_vect', 'linerental_vect', 'lines_vect', 'link_vect', 'lion_vect', 'lionm_vect', 'lionp_vect', 'lions_vect', 'lip_vect', 'list_vect', 'listen_vect', 'listening_vect', 'literally_vect', 'little_vect', 'live_vect', 'liverpool_vect', 'living_vect', 'lk_vect', 'll_vect', 'lmao_vect', 'lo_vect', 'loads_vect', 'loan_vect', 'loans_vect', 'local_vect', 'locations_vect', 'lock_vect', 'log_vect', 'login_vect', 'logo_vect', 'logopic_vect', 'lol_vect', 'london_vect', 'lonely_vect', 'long_vect', 'longer_vect', 'look_vect', 'lookatme_vect', 'looked_vect', 'lookin_vect', 'looking_vect', 'looks_vect', 'lor_vect', 'lose_vect', 'losing_vect', 'loss_vect', 'lost_vect', 'lot_vect', 'lotr_vect', 'lots_vect', 'lou_vect', 'loud_vect', 'lounge_vect', 'lousy_vect', 'lovable_vect', 'love_vect', 'loved_vect', 'lovely_vect', 'loveme_vect', 'lover_vect', 'loverboy_vect', 'lovers_vect', 'loves_vect', 'loving_vect', 'low_vect', 'lower_vect', 'loyal_vect', 'loyalty_vect', 'ls15hb_vect', 'lt_vect', 'ltd_vect', 'luck_vect', 'lucky_vect', 'lucy_vect', 'lunch_vect', 'luv_vect', 'lux_vect', 'luxury_vect', 'lyf_vect', 'lyfu_vect', 'lyk_vect', 'm227xy_vect', 'm26_vect', 'm263uz_vect', 'm8s_vect', 'mac_vect', 'machan_vect', 'macho_vect', 'mad_vect', 'madam_vect', 'made_vect', 'mag_vect', 'maga_vect', 'magical_vect', 'mah_vect', 'mail_vect', 'mailbox_vect', 'main_vect', 'maintain_vect', 'major_vect', 'make_vect', 'makes_vect', 'makin_vect', 'making_vect', 'malaria_vect', 'male_vect', 'mall_vect', 'man_vect', 'managed_vect', 'management_vect', 'many_vect', 'map_vect', 'march_vect', 'mark_vect', 'market_vect', 'marriage_vect', 'married_vect', 'marry_vect', 'masters_vect', 'match_vect', 'matches_vect', 'mate_vect', 'mates_vect', 'matrix3_vect', 'matter_vect', 'max10mins_vect', 'maximize_vect', 'maxå_vect', 'may_vect', 'mayb_vect', 'maybe_vect', 'mca_vect', 'mcat_vect', 'meal_vect', 'mean_vect', 'meaning_vect', 'means_vect', 'meant_vect', 'meanwhile_vect', 'med_vect', 'medical_vect', 'medicine_vect', 'meds_vect', 'meet_vect', 'meetin_vect', 'meeting_vect', 'meh_vect', 'mei_vect', 'member_vect', 'members_vect', 'men_vect', 'menu_vect', 'merry_vect', 'mess_vect', 'message_vect', 'messaged_vect', 'messages_vect', 'messaging_vect', 'met_vect', 'mid_vect', 'middle_vect', 'midnight_vect', 'mids_vect', 'might_vect', 'miles_vect', 'milk_vect', 'min_vect', 'mind_vect', 'mine_vect', 'mini_vect', 'minimum_vect', 'minor_vect', 'mins_vect', 'minute_vect', 'minutes_vect', 'minuts_vect', 'miracle_vect', 'mis_vect', 'miserable_vect', 'miss_vect', 'missed_vect', 'missin_vect', 'missing_vect', 'mistake_vect', 'mistakes_vect', 'mite_vect', 'mm_vect', 'mmm_vect', 'mmmm_vect', 'mmmmmm_vect', 'mnths_vect', 'mo_vect', 'moan_vect', 'mob_vect', 'mobile_vect', 'mobiles_vect', 'mobilesdirect_vect', 'mobilesvary_vect', 'mobileupd8_vect', 'mobno_vect', 'moby_vect', 'mode_vect', 'model_vect', 'module_vect', 'modules_vect', 'moji_vect', 'mojibiola_vect', 'mokka_vect', 'mom_vect', 'moment_vect', 'moments_vect', 'moms_vect', 'mon_vect', 'monday_vect', 'money_vect', 'monkeys_vect', 'mono_vect', 'month_vect', 'monthly_vect', 'months_vect', 'mood_vect', 'moon_vect', 'moral_vect', 'morn_vect', 'morning_vect', 'mostly_vect', 'mother_vect', 'motorola_vect', 'mouth_vect', 'move_vect', 'moved_vect', 'movie_vect', 'movies_vect', 'moving_vect', 'mp3_vect', 'mr_vect', 'mrng_vect', 'mrt_vect', 'msg_vect', 'msgs_vect', 'mths_vect', 'mu_vect', 'much_vect', 'mum_vect', 'mummy_vect', 'murder_vect', 'murdered_vect', 'murderer_vect', 'music_vect', 'must_vect', 'muz_vect', 'na_vect', 'nag_vect', 'nah_vect', 'naked_vect', 'name_vect', 'name1_vect', 'name2_vect', 'named_vect', 'names_vect', 'nan_vect', 'nap_vect', 'nasdaq_vect', 'nat_vect', 'national_vect', 'natural_vect', 'nature_vect', 'naughty_vect', 'nb_vect', 'nd_vect', 'ne_vect', 'near_vect', 'nearly_vect', 'necessarily_vect', 'necklace_vect', 'ned_vect', 'need_vect', 'needed_vect', 'needs_vect', 'neither_vect', 'net_vect', 'netcollex_vect', 'network_vect', 'networks_vect', 'neva_vect', 'never_vect', 'new_vect', 'newest_vect', 'news_vect', 'next_vect', 'ni8_vect', 'nice_vect', 'nigeria_vect', 'night_vect', 'nights_vect', 'nimya_vect', 'nite_vect', 'no1_vect', 'nobody_vect', 'noe_vect', 'nokia_vect', 'nokias_vect', 'noline_vect', 'none_vect', 'noon_vect', 'nope_vect', 'norm_vect', 'norm150ptone_vect', 'normal_vect', 'normally_vect', 'northampton_vect', 'note_vect', 'nothin_vect', 'nothing_vect', 'notice_vect', 'noun_vect', 'nowadays_vect', 'nt_vect', 'ntt_vect', 'ntwk_vect', 'num_vect', 'number_vect', 'numbers_vect', 'nuther_vect', 'nvm_vect', 'nw_vect', 'nxt_vect', 'nyc_vect', 'nydc_vect', 'nyt_vect', 'o2_vect', 'obviously_vect', 'odi_vect', 'offer_vect', 'offers_vect', 'office_vect', 'official_vect', 'officially_vect', 'ofice_vect', 'often_vect', 'oh_vect', 'oi_vect', 'oic_vect', 'ok_vect', 'okay_vect', 'okey_vect', 'okie_vect', 'ola_vect', 'old_vect', 'omg_vect', 'omw_vect', 'one_vect', 'ones_vect', 'oni_vect', 'online_vect', 'onto_vect', 'onwards_vect', 'oooh_vect', 'oops_vect', 'open_vect', 'opening_vect', 'operator_vect', 'opinion_vect', 'opportunity_vect', 'opt_vect', 'option_vect', 'optout_vect', 'or2stoptxt_vect', 'orange_vect', 'oranges_vect', 'orchard_vect', 'order_vect', 'ordered_vect', 'oredi_vect', 'original_vect', 'oru_vect', 'os_vect', 'oso_vect', 'others_vect', 'otherwise_vect', 'otside_vect', 'outside_vect', 'outstanding_vect', 'outta_vect', 'ovulation_vect', 'oz_vect', 'pa_vect', 'pack_vect', 'package_vect', 'page_vect', 'pages_vect', 'paid_vect', 'pain_vect', 'painful_vect', 'painting_vect', 'panic_vect', 'paper_vect', 'papers_vect', 'paperwork_vect', 'parco_vect', 'parent_vect', 'parents_vect', 'paris_vect', 'park_vect', 'parked_vect', 'parking_vect', 'part_vect', 'partner_vect', 'partnership_vect', 'party_vect', 'pass_vect', 'passed_vect', 'password_vect', 'past_vect', 'pattern_vect', 'patty_vect', 'pay_vect', 'paying_vect', 'payment_vect', 'payoh_vect', 'pc_vect', 'peace_vect', 'pence_vect', 'people_vect', 'per_vect', 'perfect_vect', 'period_vect', 'person_vect', 'personal_vect', 'pete_vect', 'petey_vect', 'pg_vect', 'philosophy_vect', 'phoenix_vect', 'phone_vect', 'phones_vect', 'photo_vect', 'photos_vect', 'pic_vect', 'pick_vect', 'picked_vect', 'picking_vect', 'pics_vect', 'picsfree1_vect', 'picture_vect', 'pictures_vect', 'pie_vect', 'pieces_vect', 'pig_vect', 'pilates_vect', 'pin_vect', 'pink_vect', 'piss_vect', 'pissed_vect', 'pix_vect', 'pizza_vect', 'place_vect', 'places_vect', 'plan_vect', 'planned_vect', 'planning_vect', 'plans_vect', 'play_vect', 'played_vect', 'player_vect', 'players_vect', 'playing_vect', 'please_vect', 'pleased_vect', 'pleasure_vect', 'plenty_vect', 'pls_vect', 'plus_vect', 'plz_vect', 'pm_vect', 'po_vect', 'pobox_vect', 'pobox334_vect', 'pobox36504w45wq_vect', 'pobox45w2tg150p_vect', 'pobox84_vect', 'pod_vect', 'point_vect', 'points_vect', 'poker_vect', 'pole_vect', 'police_vect', 'politicians_vect', 'poly_vect', 'polyphonic_vect', 'polys_vect', 'pongal_vect', 'poor_vect', 'pop_vect', 'popped_vect', 'porn_vect', 'possession_vect', 'possible_vect', 'post_vect', 'postcard_vect', 'postcode_vect', 'posted_vect', 'posts_vect', 'potential_vect', 'potter_vect', 'pound_vect', 'pounds_vect', 'pouts_vect', 'power_vect', 'ppl_vect', 'pple_vect', 'ppm_vect', 'prabha_vect', 'practice_vect', 'practicing_vect', 'pray_vect', 'prefer_vect', 'premier_vect', 'prepare_vect', 'prescription_vect', 'present_vect', 'press_vect', 'pretty_vect', 'prey_vect', 'price_vect', 'prince_vect', 'princess_vect', 'print_vect', 'privacy_vect', 'private_vect', 'prize_vect', 'prob_vect', 'probably_vect', 'problem_vect', 'problems_vect', 'process_vect', 'processed_vect', 'prof_vect', 'profit_vect', 'program_vect', 'project_vect', 'prolly_vect', 'promise_vect', 'promises_vect', 'promo_vect', 'proof_vect', 'properly_vect', 'prospects_vect', 'provided_vect', 'ps_vect', 'ptbo_vect', 'pub_vect', 'pull_vect', 'purchase_vect', 'purity_vect', 'purpose_vect', 'push_vect', 'pushes_vect', 'pussy_vect', 'put_vect', 'puttin_vect', 'putting_vect', 'qatar_vect', 'quality_vect', |
'queen_vect', 'ques_vect', 'question_vect', 'questions_vect', 'quick_vect', 'quickly_vect', 'quiet_vect', 'quit_vect', 'quite_vect', 'quiz_vect', 'quote_vect', 'quoting_vect', 'racing_vect', 'radio_vect', 'railway_vect', 'rain_vect', 'raining_vect', 'raise_vect', 'rakhesh_vect', 'rally_vect', 'ran_vect', 'random_vect', 'randomly_vect', 'randy_vect', 'rang_vect', 'range_vect', 'ranjith_vect', 'rate_vect', 'rates_vect', 'rather_vect', 'rays_vect', 'rcvd_vect', 'rd_vect', 're_vect', 'reach_vect', 'reached_vect', 'reaching_vect', 'reaction_vect', 'read_vect', 'readers_vect', 'reading_vect', 'ready_vect', 'real_vect', 'realise_vect', 'reality_vect', 'realized_vect', 'really_vect', 'realy_vect', 'reason_vect', 'reasonable_vect', 'reasons_vect', 'reboot_vect', 'recd_vect', 'receipt_vect', 'receive_vect', 'received_vect', 'receiving_vect', 'recent_vect', 'recently_vect', 'recession_vect', 'record_vect', 'records_vect', 'recovery_vect', 'red_vect', 'ref_vect', 'reference_vect', 'reg_vect', 'regards_vect', 'register_vect', 'registered_vect', 'regret_vect', 'regular_vect', 'relation_vect', 'relax_vect', 'released_vect', 'rem_vect', 'remain_vect', 'remains_vect', 'remember_vect', 'remembered_vect', 'remembr_vect', 'remind_vect', 'reminder_vect', 'remove_vect', 'rent_vect', 'rental_vect', 'rentl_vect', 'repair_vect', 'repeat_vect', 'replied_vect', 'reply_vect', 'replying_vect', 'report_vect', 'representative_vect', 'request_vect', 'requests_vect', 'research_vect', 'resend_vect', 'respect_vect', 'respond_vect', 'responding_vect', 'response_vect', 'responsibility_vect', 'rest_vect', 'restaurant_vect', 'result_vect', 'results_vect', 'retrieve_vect', 'return_vect', 'returned_vect', 'returns_vect', 'reveal_vect', 'revealed_vect', 'review_vect', 'revision_vect', 'reward_vect', 'rhythm_vect', 'rice_vect', 'rich_vect', 'ride_vect', 'right_vect', 'rights_vect', 'ring_vect', 'ringtone_vect', 'ringtones_vect', 'rite_vect', 'river_vect', 'road_vect', 'roads_vect', 'roast_vect', 'rock_vect', 'rocks_vect', 'rofl_vect', 'roger_vect', 'role_vect', 'ron_vect', 'room_vect', 'roommate_vect', 'roommates_vect', 'rooms_vect', 'rose_vect', 'round_vect', 'row_vect', 'roww1j6hl_vect', 'roww1jhl_vect', 'royal_vect', 'rply_vect', 'rs_vect', 'rstm_vect', 'ru_vect', 'rub_vect', 'rude_vect', 'rule_vect', 'run_vect', 'running_vect', 'runs_vect', 'rush_vect', 'sacrifice_vect', 'sad_vect', 'sae_vect', 'safe_vect', 'said_vect', 'salam_vect', 'salary_vect', 'sale_vect', 'salon_vect', 'sam_vect', 'santa_vect', 'sar_vect', 'sarasota_vect', 'sarcastic_vect', 'sary_vect', 'sat_vect', 'sathya_vect', 'saturday_vect', 'savamob_vect', 'save_vect', 'saved_vect', 'saw_vect', 'say_vect', 'saying_vect', 'says_vect', 'scared_vect', 'scary_vect', 'sch_vect', 'schedule_vect', 'school_vect', 'schools_vect', 'science_vect', 'scold_vect', 'score_vect', 'scores_vect', 'scotland_vect', 'scream_vect', 'screaming_vect', 'scrounge_vect', 'se_vect', 'sea_vect', 'search_vect', 'searching_vect', 'season_vect', 'seat_vect', 'sec_vect', 'second_vect', 'seconds_vect', 'secret_vect', 'secretary_vect', 'secs_vect', 'sed_vect', 'see_vect', 'seeing_vect', 'seem_vect', 'seemed_vect', 'seems_vect', 'seen_vect', 'selected_vect', 'selection_vect', 'self_vect', 'sell_vect', 'selling_vect', 'sells_vect', 'sem_vect', 'semester_vect', 'sen_vect', 'send_vect', 'sender_vect', 'sending_vect', 'sense_vect', 'sent_vect', 'sentence_vect', 'sept_vect', 'series_vect', 'serious_vect', 'seriously_vect', 'service_vect', 'services_vect', 'serving_vect', 'set_vect', 'setting_vect', 'settings_vect', 'settle_vect', 'settled_vect', 'seven_vect', 'several_vect', 'sex_vect', 'sexy_vect', 'sh_vect', 'sha_vect', 'shall_vect', 'share_vect', 'shd_vect', 'sheets_vect', 'shes_vect', 'shesil_vect', 'shining_vect', 'ship_vect', 'shipping_vect', 'shirt_vect', 'shirts_vect', 'shit_vect', 'shld_vect', 'shocking_vect', 'shoot_vect', 'shop_vect', 'shoppin_vect', 'shopping_vect', 'short_vect', 'shorter_vect', 'shortly_vect', 'shot_vect', 'shoving_vect', 'show_vect', 'shower_vect', 'showing_vect', 'shows_vect', 'shu_vect', 'shuhui_vect', 'shy_vect', 'si_vect', 'sick_vect', 'side_vect', 'sighs_vect', 'sight_vect', 'sign_vect', 'signing_vect', 'silence_vect', 'silent_vect', 'silver_vect', 'sim_vect', 'simple_vect', 'simply_vect', 'since_vect', 'sing_vect', 'singing_vect', 'single_vect', 'singles_vect', 'sipix_vect', 'sir_vect', 'sis_vect', 'sister_vect', 'sit_vect', 'site_vect', 'sitting_vect', 'situation_vect', 'siva_vect', 'six_vect', 'size_vect', 'sk3_vect', 'sk38xh_vect', 'skilgme_vect', 'skip_vect', 'sky_vect', 'skype_vect', 'skyped_vect', 'slap_vect', 'slave_vect', 'sleep_vect', 'sleepin_vect', 'sleeping_vect', 'sleepy_vect', 'slept_vect', 'slice_vect', 'slide_vect', 'slightly_vect', 'slip_vect', 'slippers_vect', 'slo_vect', 'slots_vect', 'slow_vect', 'slowly_vect', 'small_vect', 'smashed_vect', 'smile_vect', 'smiles_vect', 'smiling_vect', 'smoke_vect', 'smoking_vect', 'sms_vect', 'smth_vect', 'sn_vect', 'snake_vect', 'snow_vect', 'social_vect', 'sofa_vect', 'soft_vect', 'software_vect', 'sol_vect', 'some1_vect', 'somebody_vect', 'someone_vect', 'somethin_vect', 'something_vect', 'sometimes_vect', 'somewhere_vect', 'song_vect', 'songs_vect', 'sony_vect', 'sonyericsson_vect', 'soon_vect', 'sooner_vect', 'sore_vect', 'sorry_vect', 'sort_vect', 'sorting_vect', 'sound_vect', 'sounds_vect', 'south_vect', 'sp_vect', 'space_vect', 'spanish_vect', 'speak_vect', 'speaking_vect', 'special_vect', 'specialcall_vect', 'specially_vect', 'speed_vect', 'spend_vect', 'spending_vect', 'spent_vect', 'spk_vect', 'spoke_vect', 'spoken_vect', 'spook_vect', 'sport_vect', 'sports_vect', 'spree_vect', 'spring_vect', 'sptv_vect', 'sry_vect', 'st_vect', 'staff_vect', 'stamps_vect', 'stand_vect', 'standard_vect', 'standing_vect', 'star_vect', 'staring_vect', 'start_vect', 'started_vect', 'starting_vect', 'starts_vect', 'starwars3_vect', 'statement_vect', 'station_vect', 'stay_vect', 'stayed_vect', 'staying_vect', 'std_vect', 'steam_vect', 'step_vect', 'steve_vect', 'stick_vect', 'sticky_vect', 'still_vect', 'stock_vect', 'stockport_vect', 'stomach_vect', 'stomps_vect', 'stones_vect', 'stop_vect', 'stopped_vect', 'stops_vect', 'store_vect', 'stores_vect', 'story_vect', 'str_vect', 'straight_vect', 'stranger_vect', 'street_vect', 'stress_vect', 'strike_vect', 'strong_vect', 'strongbuy_vect', 'stuck_vect', 'student_vect', 'study_vect', 'studying_vect', 'stuff_vect', 'stupid_vect', 'style_vect', 'stylish_vect', 'sub_vect', 'subpoly_vect', 'subs_vect', 'subscribe6gbpmnth_vect', 'subscribed_vect', 'subscriber_vect', 'subscription_vect', 'success_vect', 'successful_vect', 'successfully_vect', 'sucks_vect', 'sue_vect', 'sufficient_vect', 'suggest_vect', 'suite_vect', 'suits_vect', 'sum1_vect', 'summer_vect', 'sun_vect', 'sunday_vect', 'sunlight_vect', 'sunny_vect', 'sunshine_vect', 'suntec_vect', 'sup_vect', 'super_vect', 'superb_vect', 'superior_vect', 'supervisor_vect', 'supply_vect', 'support_vect', 'suppose_vect', 'supposed_vect', 'suprman_vect', 'sura_vect', 'sure_vect', 'surely_vect', 'surfing_vect', 'surprise_vect', 'surprised_vect', 'survey_vect', 'sux_vect', 'suzy_vect', 'sw7_vect', 'sw73ss_vect', 'sweet_vect', 'swing_vect', 'system_vect', 'ta_vect', 'tablets_vect', 'tahan_vect', 'take_vect', 'taken_vect', 'takes_vect', 'takin_vect', 'taking_vect', 'talent_vect', 'talk_vect', 'talking_vect', 'tampa_vect', 'tape_vect', 'tariffs_vect', 'tat_vect', 'taunton_vect', 'taylor_vect', 'tb_vect', 'tc_vect', 'tcrw1_vect', 'tcs_vect', 'tea_vect', 'teach_vect', 'teacher_vect', 'teaches_vect', 'team_vect', 'tear_vect', 'tease_vect', 'teasing_vect', 'tech_vect', 'technical_vect', 'tee_vect', 'teeth_vect', 'tel_vect', 'telephone_vect', 'tell_vect', 'telling_vect', 'tells_vect', 'telugu_vect', 'temple_vect', 'ten_vect', 'tenants_vect', 'tenerife_vect', 'tension_vect', 'term_vect', 'terms_vect', 'terrible_vect', 'test_vect', 'testing_vect', 'text_vect', 'texted_vect', 'texting_vect', 'textoperator_vect', 'texts_vect', 'th_vect', 'thangam_vect', 'thank_vect', 'thanks_vect', 'thanksgiving_vect', 'thanx_vect', 'that_vect', 'thats_vect', 'thatåõs_vect', 'the_vect', 'theatre_vect', 'themob_vect', 'theory_vect', 'thesis_vect', 'thgt_vect', 'thing_vect', 'things_vect', 'think_vect', 'thinkin_vect', 'thinking_vect', 'thinks_vect', 'thk_vect', 'thnk_vect', 'tho_vect', 'though_vect', 'thought_vect', 'three_vect', 'throat_vect', 'throw_vect', 'thru_vect', 'tht_vect', 'thts_vect', 'thurs_vect', 'thursday_vect', 'tick_vect', 'ticket_vect', 'tickets_vect', 'tight_vect', 'tihs_vect', 'til_vect', 'till_vect', 'time_vect', 'times_vect', 'timing_vect', 'tired_vect', 'tirunelvali_vect', 'tirupur_vect', 'tissco_vect', 'tkts_vect', 'tm_vect', 'tming_vect', 'tmobile_vect', 'tmr_vect', 'tncs_vect', 'toa_ |
vect', 'toclaim_vect', 'today_vect', 'todays_vect', 'tog_vect', 'together_vect', 'tok_vect', 'told_vect', 'tomarrow_vect', 'tomo_vect', 'tomorrow_vect', 'tone_vect', 'tones_vect', 'tones2youcouk_vect', 'tonight_vect', 'tonite_vect', 'took_vect', 'tool_vect', 'tooo_vect', 'toot_vect', 'top_vect', 'topic_vect', 'torch_vect', 'toshiba_vect', 'tot_vect', 'total_vect', 'totally_vect', 'touch_vect', 'tough_vect', 'tour_vect', 'towards_vect', 'town_vect', 'track_vect', 'trade_vect', 'traffic_vect', 'train_vect', 'training_vect', 'transaction_vect', 'transfer_vect', 'transport_vect', 'travel_vect', 'treat_vect', 'treated_vect', 'tried_vect', 'trip_vect', 'trips_vect', 'trouble_vect', 'true_vect', 'truffles_vect', 'truly_vect', 'trust_vect', 'truth_vect', 'try_vect', 'trying_vect', 'ts_vect', 'tscs_vect', 'tscs087147403231winawk_vect', 'tt_vect', 'ttyl_vect', 'tues_vect', 'tuesday_vect', 'tuition_vect', 'turn_vect', 'turning_vect', 'turns_vect', 'tv_vect', 'twelve_vect', 'twice_vect', 'two_vect', 'txt_vect', 'txtauction_vect', 'txtin_vect', 'txting_vect', 'txtno_vect', 'txts_vect', 'txtstop_vect', 'tyler_vect', 'type_vect', 'tyrone_vect', 'u4_vect', 'ubi_vect', 'ufind_vect', 'ugh_vect', 'uh_vect', 'uk_vect', 'uks_vect', 'ultimatum_vect', 'umma_vect', 'unable_vect', 'uncle_vect', 'understand_vect', 'understanding_vect', 'understood_vect', 'underwear_vect', 'unemployed_vect', 'uni_vect', 'unique_vect', 'university_vect', 'unless_vect', 'unlimited_vect', 'unnecessarily_vect', 'unredeemed_vect', 'unsold_vect', 'unsub_vect', 'unsubscribe_vect', 'upd8_vect', 'update_vect', 'updatenow_vect', 'upgrade_vect', 'upload_vect', 'upset_vect', 'upstairs_vect', 'ur_vect', 'ure_vect', 'urgent_vect', 'urgnt_vect', 'url_vect', 'urn_vect', 'urself_vect', 'us_vect', 'usb_vect', 'use_vect', 'used_vect', 'user_vect', 'usf_vect', 'using_vect', 'usual_vect', 'usually_vect', 'vale_vect', 'valentine_vect', 'valentines_vect', 'valid_vect', 'valid12hrs_vect', 'valuable_vect', 'value_vect', 'valued_vect', 'vary_vect', 've_vect', 'vegas_vect', 'verify_vect', 'version_vect', 'via_vect', 'vid_vect', 'video_vect', 'videochat_vect', 'videophones_vect', 'vijay_vect', 'vikky_vect', 'village_vect', 'violated_vect', 'violence_vect', 'vip_vect', 'virgin_vect', 'visit_vect', 'vivek_vect', 'vl_vect', 'voda_vect', 'vodafone_vect', 'vodka_vect', 'voice_vect', 'voicemail_vect', 'vomit_vect', 'vote_vect', 'voucher_vect', 'vouchers_vect', 'vry_vect', 'vth_vect', 'w45wq_vect', 'wa_vect', 'wah_vect', 'wait_vect', 'waited_vect', 'waitin_vect', 'waiting_vect', 'wake_vect', 'waking_vect', 'wales_vect', 'walk_vect', 'walked_vect', 'walking_vect', 'walmart_vect', 'wan_vect', 'wana_vect', 'want_vect', 'wanted_vect', 'wanting_vect', 'wants_vect', 'wap_vect', 'warm_vect', 'warner_vect', 'waste_vect', 'wasted_vect', 'wat_vect', 'watch_vect', 'watching_vect', 'water_vect', 'wats_vect', 'way_vect', 'wc1n3xx_vect', 'we_vect', 'weak_vect', 'wear_vect', 'wearing_vect', 'weather_vect', 'web_vect', 'website_vect', 'wed_vect', 'wedding_vect', 'wednesday_vect', 'wee_vect', 'weed_vect', 'week_vect', 'weekend_vect', 'weekends_vect', 'weekly_vect', 'weeks_vect', 'weigh_vect', 'weight_vect', 'weird_vect', 'welcome_vect', 'well_vect', 'welp_vect', 'wen_vect', 'went_vect', 'west_vect', 'wet_vect', 'what_vect', 'whatever_vect', 'whats_vect', 'whenever_vect', 'whenevr_vect', 'wherever_vect', 'whether_vect', 'white_vect', 'whn_vect', 'whole_vect', 'whos_vect', 'whose_vect', 'wid_vect', 'widelivecomindex_vect', 'wif_vect', 'wife_vect', 'wil_vect', 'willing_vect', 'win_vect', 'wind_vect', 'wine_vect', 'winner_vect', 'winning_vect', 'wins_vect', 'wipro_vect', 'wisdom_vect', 'wise_vect', 'wish_vect', 'wishes_vect', 'wishing_vect', 'wit_vect', 'within_vect', 'without_vect', 'wiv_vect', 'wk_vect', 'wkend_vect', 'wkg_vect', 'wkly_vect', 'wks_vect', 'wld_vect', 'wml_vect', 'wn_vect', 'wnt_vect', 'wo_vect', 'woke_vect', 'woken_vect', 'woman_vect', 'women_vect', 'wonder_vect', 'wonderful_vect', 'wondering_vect', 'wont_vect', 'woot_vect', 'word_vect', 'words_vect', 'work_vect', 'workin_vect', 'working_vect', 'works_vect', 'world_vect', 'worried_vect', 'worries_vect', 'worry_vect', 'worse_vect', 'worst_vect', 'worth_vect', 'wot_vect', 'would_vect', 'wow_vect', 'write_vect', 'wrong_vect', 'wtf_vect', 'wud_vect', 'wuld_vect', 'wun_vect', 'www4tcbiz_vect', 'wwwcomuknet_vect', 'wwwetlpcoukexpressoffer_vect', 'wwwgetzedcouk_vect', 'wwwldewcom_vect', 'wwwldewcom1win150ppmx3age16_vect', 'wwwmovietriviatv_vect', 'wwwringtonescouk_vect', 'wwwsmsconet_vect', 'wwwtxttowincouk_vect', 'wwwurawinnercom_vect', 'wylie_vect', 'xchat_vect', 'xmas_vect', 'xuhui_vect', 'xx_vect', 'xxx_vect', 'xxxx_vect', 'xxxxx_vect', 'xy_vect', 'ya_vect', 'yahoo_vect', 'yan_vect', 'yar_vect', 'yay_vect', 'yck_vect', 'yeah_vect', 'year_vect', 'years_vect', 'yelling_vect', 'yellow_vect', 'yep_vect', 'yes_vect', 'yest_vect', 'yesterday_vect', 'yet_vect', 'yetunde_vect', 'yijue_vect', 'ym_vect', 'yo_vect', 'yoga_vect', 'yogasana_vect', 'yor_vect', 'you_vect', 'yr_vect', 'yrs_vect', 'yummy_vect', 'yun_vect', 'yuo_vect', 'yup_vect', 'zed_vect', 'zindgi_vect', 'ìï_vect', 'ûò_vect']}, 'training': {'algo': 'Logistic Regression', 'model_file': 'AI0110_1.sav'}}
deployer = get_deployer('classification',params=config)
deployer.run( )<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import shutil
import subprocess
from os.path import expanduser
import platform
deploymentfolder = os.path.join(os.path.dirname(os.path.abspath(__file__)),'HCLT','AION','target')
modelname='AION_12'
version='1'
def createDockerImage(deploymentfolder,modelname,version,learner_type,textdata):
modelPath = os.path.join(deploymentfolder)
filename = os.path.join(deploymentfolder,'docker_image')
modelservice = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','run_modelService.py')
shellscript = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','start_modelservice.sh')
aix = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','AIX-0.1-py3-none-any.whl')
drift = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','Drift-0.1-py3-none-any.whl')
sitepackage = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..','..','..','extensions','site-packages')
model_dockerSetup = os.path.join(os.path.dirname(os.path.abspath(__file__)),'dockersetup','docker_'+modelname + '_' + version)
docker_setup = os.path.join(model_dockerSetup,modelname + '_' + version)
model_sitepackage = os.path.join(model_dockerSetup,'site-packages')
model_dockerSetupservicefile = os.path.join(model_dockerSetup,'run_modelService.py')
model_dockershellscript = os.path.join(model_dockerSetup,'start_modelservice.sh')
model_aix = os.path.join(model_dockerSetup,'AIX-0.1-py3-none-any.whl')
model_drift = os.path.join(model_dockerSetup,'Drift-0.1-py3-none-any.whl')
try:
os.mkdir(model_dockerSetup)
except Exception as e:
print("Error in creating Setup directpry "+str(e))
pass
shutil.copytree(modelPath, docker_setup)
if textdata:
shutil.copytree(sitepackage, model_sitepackage)
modelpretrainpath=os.path.join(model_dockerSetup,'HCLT','AION','PreTrainedModels','TextProcessing')
'''
try:
os.makedirs(modelpretrainpath, exist_ok=True)
except Exception as e:
print("Error in creating Setup directpry "+str(e))
pass
'''
home = expanduser("~")
if platform.system() == 'Windows':
hostpretrainpath = os.path.join(home,'AppData','Local','HCLT','AION','PreTrainedModels','TextProcessing')
else:
hostpretrainpath = os.path.join(home,'HCLT','AION','PreTrainedModels','TextProcessing')
shutil.copytree(hostpretrainpath, modelpretrainpath)
shutil.copyfile(modelservice, model_dockerSetupservicefile)
shutil.copyfile(shellscript, model_dockershellscript)
shutil.copyfile(aix, model_aix)
shutil.copyfile(drift,model_drift)
try:
os.mkdir(filename)
except:
pass
requirementfilename = os.path.join(model_dockerSetup,'requirements.txt')
installfilename = os.path.join(model_dockerSetup,'install.py')
dockerfile = os.path.join(model_dockerSetup,'Dockerfile')
dockerdata='FROM python:3.8-slim-buster'
dockerdata+='\\n'
if textdata:
dockerdata+='WORKDIR /root'
dockerdata+='\\n'
dockerdata+='COPY HCLT HCLT'
dockerdata+='\\n'
dockerdata+='WORKDIR /app'
dockerdata+='\\n'
dockerdata+='COPY requirements.txt requirements.txt'
dockerdata+='\\n'
dockerdata+='COPY '+modelname+'_'+version+' '+modelname+'_'+version
dockerdata+='\\n'
if textdata:
dockerdata+='COPY site-packages site-packages'
dockerdata+='\\n'
dockerdata+='COPY install.py install.py'
dockerdata+='\\n'
dockerdata+='COPY run_modelService.py run_modelService.py'
dockerdata+='\\n'
dockerdata+='COPY AIX-0.1-py3-none-any.whl AIX-0.1-py3-none-any.whl'
dockerdata+='\\n'
dockerdata+='COPY Drift-0.1-py3-none-any.whl Drift-0.1-py3-none-any.whl'
dockerdata+='\\n'
dockerdata+='COPY start_ |
modelservice.sh start_modelservice.sh'
dockerdata+='\\n'
if textdata:
dockerdata+='''RUN apt-get update \\
&& apt-get install -y build-essential manpages-dev \\
&& python -m pip install --no-cache-dir --upgrade pip \\
&& python -m pip install --no-cache-dir pandas==1.2.4 \\
&& python -m pip install --no-cache-dir numpy==1.19.5 \\
&& python -m pip install --no-cache-dir joblib==1.0.1 \\
&& python -m pip install --no-cache-dir Cython==0.29.23 \\
&& mv site-packages/* /usr/local/lib/python3.8/site-packages \\
&& python -m pip install --no-cache-dir scipy==1.6.3 \\
&& python -m pip install --no-cache-dir AIX-0.1-py3-none-any.whl \\
&& python -m pip install --no-cache-dir Drift-0.1-py3-none-any.whl \\
&& python -m pip install --no-cache-dir scikit-learn==0.24.2 \\
&& python -m pip install --no-cache-dir spacy==2.2.3 \\
&& python -m pip install --no-cache-dir nltk==3.6.2 \\
&& python -m pip install --no-cache-dir textblob==0.15.3 \\
&& python -m pip install --no-cache-dir gensim==3.8.3 \\
&& python -m pip install --no-cache-dir demoji==1.1.0 \\
&& python -m pip install --no-cache-dir lxml==4.6.3 \\
&& python -m pip install --no-cache-dir Beautifulsoup4==4.9.3 \\
&& python -m pip install --no-cache-dir Unidecode==1.2.0 \\
&& python -m pip install --no-cache-dir pyspellchecker==0.6.2 \\
&& python -m pip install --no-cache-dir pycontractions==2.0.1 \\
&& python -m pip install --no-cache-dir tensorflow==2.4.1 \\
&& python -m pip install --no-cache-dir nltk==3.6.2 \\
&& python -m pip install --no-cache-dir -r requirements.txt \\
&& python install.py \\
&& chmod +x start_modelservice.sh
ENTRYPOINT ["./start_modelservice.sh"]
'''
else:
dockerdata+='''RUN apt-get update \\
&& apt-get install -y build-essential manpages-dev \\
&& python -m pip install --no-cache-dir --upgrade pip \\
&& python -m pip install --no-cache-dir pandas==1.2.4 \\
&& python -m pip install --no-cache-dir numpy==1.19.5 \\
&& python -m pip install --no-cache-dir joblib==1.0.1 \\
&& python -m pip install --no-cache-dir Cython==0.29.23 \\
&& python -m pip install --no-cache-dir scipy==1.6.3 \\
&& python -m pip install --no-cache-dir AIX-0.1-py3-none-any.whl \\
&& python -m pip install --no-cache-dir Drift-0.1-py3-none-any.whl \\
&& python -m pip install --no-cache-dir scikit-learn==0.24.2 \\
&& python -m pip install --no-cache-dir -r requirements.txt \\
&& chmod +x start_modelservice.sh
ENTRYPOINT ["./start_modelservice.sh"]
'''
f = open(dockerfile, "w")
f.write(str(dockerdata))
f.close()
requirementdata=''
requirementdata+='word2number==1.1'
if learner_type == 'DL':
requirementdata+='\\n'
requirementdata+='tensorflow==2.5.0'
f = open(requirementfilename, "w")
f.write(str(requirementdata))
f.close()
if textdata:
installfile='''
import nltk
import ssl
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
pass
else:
ssl._create_default_https_context = _create_unverified_https_context
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')'''
f = open(installfilename, "w")
f.write(str(installfile))
f.close()
try:
command = 'docker pull python:3.8-slim-buster'
os.system(command);
#subprocess.check_call(["chmod", "+x", "start_modelservice.sh"], cwd=model_dockerSetup)
subprocess.check_call(["docker", "build", "-t",modelname.lower()+":"+version,"."], cwd=model_dockerSetup)
subprocess.check_call(["docker", "save", "-o",modelname.lower()+"_"+version+".tar",modelname.lower()+":"+version], cwd=model_dockerSetup)
dockerfilepath = os.path.join(model_dockerSetup,modelname.lower()+"_"+version+".tar")
shutil.copyfile(dockerfilepath, os.path.join(filename,modelname.lower()+"_"+version+".tar"))
shutil.rmtree(model_dockerSetup)
return 'Success','SUCCESSFULLY'
except Exception as e:
print("Error: "+str(e))
shutil.rmtree(model_dockerSetup)
return 'Error',str(e)
#createDockerImage(deploymentfolder,modelname,version)<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import subprocess
import os
import glob
import sys
import python_minifier
def encrypt_files(path):
cwd = os.getcwd()
secure_path = os.path.join(path,'SecuredScripts')
try:
if not os.path.exists(secure_path):
os.mkdir(secure_path)
files = [f for f in glob.glob(path + "/*.py")]
for file in files:
#encrypted_file_details[0] = file
#file = files[0]
#print(file)
#filename_w_dir = os.path.splitext(file)
filename_w_ext = os.path.basename(file)
filename, file_extension = os.path.splitext(filename_w_ext)
file_folder_path = os.path.join(secure_path,filename)
#print(file_folder_path)
if not os.path.exists(file_folder_path):
os.mkdir(file_folder_path)
# Minify python source code
minify_file = os.path.join(file_folder_path,filename+'_minify.py')
pythonfolder,_ = os.path.split(sys.executable)
pyminify_script = os.path.join(pythonfolder,'Scripts','pyminify.exe')
minify_command = "\\""+sys.executable+"\\" \\""+pyminify_script+ "\\" \\"" + file + "\\" > \\"" + minify_file+"\\""
subprocess.call(minify_command, shell=True)
# Change directory to folder path
os.chdir(file_folder_path)
# Obfuscate minified file
pyarmor_script = os.path.join(pythonfolder,'Scripts','pyarmor.exe')
obfusc_commmand = "\\""+sys.executable+"\\" \\""+pyarmor_script+"\\" obfuscate \\"" + minify_file+"\\""
#print(obfusc_commmand)
subprocess.call(obfusc_commmand, shell=True)
# Change directory to dist path
obfusc_file = os.path.join(file_folder_path,'dist',filename+'_minify.py')
#print(obfusc_file)
chdirpath = os.path.join(file_folder_path,'dist')
os.chdir(chdirpath)
# Compress obfuscated file
compressed_file = os.path.join(file_folder_path,'dist',filename+'_compressed.py')
#print(compressed_file)
pyminifier_script = os.path.join(pythonfolder,'Scripts','pyminifier.exe')
compress_command = "\\""+sys.executable+"\\" \\""+pyminifier_script+"\\" --gzip -o \\"" +compressed_file + "\\" \\"" + obfusc_file+"\\""
#print(compress_command)
subprocess.call(compress_command, shell=True)
#compile_command = sys.executable+'-m py_compile "' + compressed_file+'"'
#print(compile_command)
#subprocess.call(compile_command , shell=True)
#encrypted_file_details['compiled_file'] = file
#compiled_file = os.path.join(file_folder_path,'dist','__pycache__',filename+'_compressed.cpython-37.pyc')
#encrypted_file_details[1] = compiled_file
#encrypted_file_list.append(encrypted_file_details)
#encrypted_file = filename + '_compressed.cpython-37_encrypted.pyc'
#encrypt_command = "python " + cwd + "\\\\Encrypt_Key_Dcrypt.py " + compiled_file + ' ' + encrypted_file + " --g -e"
#print(encrypt_command)
#subprocess.call(encrypt_command, shell=True)
#encrypted_file_list += ']'
#return(encrypted_file_list)
os.chdir(path)
except OSError as err:
print ("Creation of the directory %s failed "+str(err))
# Driver function
if __name__=="__main__":
path = sys.argv[1]
encrypt_files(path)
#(base) C:\\Himanshu\\DataPreprocessing>pyminify DataPreprocessing.py > DataPreprocessing_minify.py
#Obfuscate
#(base) C:\\Himanshu\\DataPreprocessing>pyarmor obfuscate C:\\Himanshu\\DataPreprocessing\\DataPreprocessing_minify.py
#Compression
#(base) C:\\Himanshu\\DataPreprocessing>pyminifier --gzip -o C:\\Himanshu\\DataPreprocessing\\dist\\DataPreprocessing_compressed.py C:\\Himanshu\\DataPreprocessing\\dist\\DataPreprocessing_minify.py
#(base) C:\\Himanshu\\DataPreprocessing>cd dist
#(base) C:\\Himanshu\\DataPreprocessing\\dist>python DataPreprocessing_compressed.py "DocumentText" "Label" 90 ".csv" "C:\\Himanshu\\DataAcquisition\\ClassificationDataNewBalanced.csv"
#Compiling compressed .py to .pyc file
#(base) C:\\Himanshu\\DataPreprocessing\\dist>python -m py_compile DataPreprocessing_compressed.py
#Encrypt .pyc file
#(base) C:\\Himanshu\\DataPreprocessing\\dist>python C:\\Himanshu\\Encrypt_Key_Dcrypt.py C:\\Himanshu\\DataPreprocessing\\dist\\__pycache__\\DataPreprocessing_compressed.cpython-36.pyc DataPreprocessing_compressed.cpython-36_encrypted.pyc --g -e
#Decrypt file
#(base) C:\\Himanshu\\DataPreprocessing\\dist>python C:\\Himanshu\\Encrypt_Key_Dcrypt.py DataPreprocessing_compressed.cpython-36_encrypted.pyc DataPreprocessing_compressed.cpython-36_decrypted.pyc --d
#Run decrypted file
#(base) C:\\Himanshu\\DataPreprocessing\\dist>python DataPreprocessing_compressed.cpython-36_decrypted.pyc "DocumentText" "Label" 90 ".csv" "C:\\Himanshu\\DataAcquisition\\ClassificationDataNewBalanced.csv"<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import platform
import sys
import subprocess
import glob
import shutil
import time
from aion_deployment.EncryptPythonSourceCode import encrypt_files
import json
def encrypt(alldirs):
for dir in alldirs:
try:
encrypt_files(dir)
except Exception as error_obj:
print("Exception in encrypting", error_obj)
print("-"*50)
def replace_by_compressed(alldirs):
for dir in alldirs:
try:
#print("Processing dir", dir)
files = [f for f in glob.glob(dir + "/*.py")]
secure_path = os.path.join(dir, 'SecuredScripts')
time.sleep(6)
for file in files:
try:
filename_w_ext = os.path.basename(file)
filename, file_extension = os.path.splitext(filename_w_ext)
if filename == "__init__":
continue
#print("Processing file", file)
file_folder_path = os.path.join(secure_path, filename, 'dist')
compressed_file_path = os.path.join(file_folder_path, filename+'_compressed.py')
shutil.copy(compressed_file_path, dir)
os.remove(file)
new_compressed_file_path = os.path.join(dir, filename+'_compressed.py')
target_file_path = os.path.join(dir, filename_w_ext)
os.rename(new_compressed_file_path, target_file_path)
if filename == 'aion_prediction':
shutil.copytree(os.path.join(file_folder_path, 'pytransform'), os.path.join(dir, 'pytransform'))
except Exception as error_obj:
print("Exception in file ", error_obj)
shutil.rmtree(secure_path)
except Exception as error_obj:
print("Exception in dir ", error_obj)
def start_Obfuscate(path):
project_path = path
subdirs = [dI for dI in os.listdir(project_path) if os.path.isdir(os.path.join(project_path,dI))]
alldirs = [
project_path,
]
for |
subdir in subdirs:
if(subdir != 'pytransform'):
alldirs.append(os.path.join(project_path, subdir))
encrypt(alldirs)
replace_by_compressed(alldirs)
if __name__=="__main__":
project_path = sys.argv[1]
print("project_path", project_path)
subdirs = [dI for dI in os.listdir(project_path) if os.path.isdir(os.path.join(project_path,dI))]
alldirs = [
project_path,
]
for subdir in subdirs:
alldirs.append(os.path.join(project_path, subdir))
encrypt(alldirs)
print("*"*50)
replace_by_compressed(alldirs)
# python eion_compress.py "C:\\Users\\ashwani.s\\Desktop\\22April\\22April\\Mohita" "C:\\Users\\ashwani.s\\Desktop\\eion\\eion" > logfile.log
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import pandas as pd
import numpy as np
import scipy
import warnings
import scipy.stats as st
import logging
import json
class inputdrift():
def __init__(self,conf):
self.log = logging.getLogger('eion')
def get_input_drift(self,ndf,hdf,outputfolder):
selectedColumns = self.features.split(',')
dataalertcount=0
distributionChangeColumns=""
distributionChangeMessage=[]
for i in range(0,len(selectedColumns)):
data1=hdf[selectedColumns[i]]
data2=ndf[selectedColumns[i]]
if(data1.dtype !="str" and data2.dtype !="str" ):
cumulativeData=data1.append(data2)
teststaticValue=teststatic(self,data1,data2)
if (teststaticValue < 0.05):
distributionName1,sse1=DistributionFinder(self,data1)
distributionName2,sse2=DistributionFinder(self,data2)
if(distributionName1 == distributionName2):
dataalertcount = dataalertcount
else:
dataalertcount = dataalertcount+1
distributionChangeColumns=distributionChangeColumns+selectedColumns[i]+","
changedColumn = {}
changedColumn['Feature'] = selectedColumns[i]
changedColumn['KS_Training'] = teststaticValue
changedColumn['Training_Distribution'] = distributionName1
changedColumn['New_Distribution'] = distributionName2
distributionChangeMessage.append(changedColumn)
else :
dataalertcount = dataalertcount
else :
response ="Selected Columns should be Numerical Values"
if(dataalertcount == 0):
resultStatus="Model is working as expected"
else :
resultStatus=json.dumps(distributionChangeMessage)
return(dataalertcount,resultStatus)
def DistributionFinder(self,data):
try:
distributionName =""
sse =0.0
KStestStatic=0.0
dataType=""
if(data.dtype == "float64"):
dataType ="Continuous"
elif(data.dtype =="int"):
dataType="Discrete"
elif(data.dtype =="int64"):
dataType="Discrete"
if(dataType == "Discrete"):
distributions= [st.bernoulli,st.binom,st.geom,st.nbinom,st.poisson]
index, counts = np.unique(data.astype(int),return_counts=True)
if(len(index)>=2):
best_sse = np.inf
y1=[]
total=sum(counts)
mean=float(sum(index*counts))/total
variance=float((sum(index**2*counts) -total*mean**2))/(total-1)
dispersion=mean/float(variance)
theta=1/float(dispersion)
r=mean*(float(theta)/1-theta)
for j in counts:
y1.append(float(j)/total)
pmf1=st.bernoulli.pmf(index,mean)
pmf2=st.binom.pmf(index,len(index),p=mean/len(index))
pmf3=st.geom.pmf(index,1/float(1+mean))
pmf4=st.nbinom.pmf(index,mean,r)
pmf5=st.poisson.pmf(index,mean)
sse1 = np.sum(np.power(y1 - pmf1, 2.0))
sse2 = np.sum(np.power(y1 - pmf2, 2.0))
sse3 = np.sum(np.power(y1 - pmf3, 2.0))
sse4 = np.sum(np.power(y1 - pmf4, 2.0))
sse5 = np.sum(np.power(y1- pmf5, 2.0))
sselist=[sse1,sse2,sse3,sse4,sse5]
for i in range(0,len(sselist)):
if best_sse > sselist[i] > 0:
best_distribution = distributions[i].name
best_sse = sselist[i]
elif (len(index) == 1):
best_distribution = "Constant Data-No Distribution"
best_sse = 0.0
distributionName =best_distribution
sse=best_sse
elif(dataType == "Continuous"):
distributions = [st.uniform,st.expon,st.weibull_max,st.weibull_min,st.chi,st.norm,st.lognorm,st.t,st.gamma,st.beta]
best_distribution = st.norm.name
best_sse = np.inf
datamin=data.min()
datamax=data.max()
nrange=datamax-datamin
y, x = np.histogram(data.astype(float), bins='auto', density=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
for distribution in distributions:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
params = distribution.fit(data.astype(float))
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Calculate fitted PDF and error with fit in distribution
pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)
sse = np.sum(np.power(y - pdf, 2.0))
if(best_sse >sse > 0):
best_distribution = distribution.name
best_sse = sse
distributionName =best_distribution
sse=best_sse
except:
response = str(sys.exc_info()[0])
message='Job has Failed'+response
print(message)
return distributionName,sse
##KStestStatic -pvalue finding
def teststatic(self,data1,data2):
try:
teststatic =st.ks_2samp(data1,data2)
pValue=0.0
scipyVersion =scipy.__version__
if(scipyVersion <= "0.14.1"):
pValue =teststatic[1]
else:
pValue =teststatic.pvalue
except:
response = str(sys.exc_info()[0])
print("Input Drift Job Failed "+response)
return pValue
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
from pathlib import Path
from AION.prediction_package.imports import importModule
from AION.prediction_package.aion_prediction import aionPrediction
from AION.prediction_package.utility import TAB_CHAR
from AION.prediction_package import utility
from AION.prediction_package.base import deployer
from AION.prediction_package import common
import numpy as np
def get_deployer( params):
if params['training']['algo'] == 'ARIMA':
return arima(params)
elif params['training']['algo'] == 'LSTM':
return lstm(params)
elif params['training']['algo'] == 'ENCODER_DECODER_LSTM_MVI_UVO':
return lstmencdec_mviuvo(params)
elif params['training']['algo'] == 'MLP':
return mlp(params)
elif params['training']['algo'] == 'VAR':
return var(params)
elif params['training']['algo'] == 'FBPROPHET':
return fbprophet(params)
else:
raise ValueError(f"Algorithm {params['training']['algo']} for time series forecasting is not supported")
def _profiler_code(params, importer):
"""
This will create the profiler file based on the config file.
separated file is created as profiler is required for input drift also.
"""
imported_modules = [
{'module': 'json', 'mod_from': None, 'mod_as': None},
{'module': 'scipy', 'mod_from': None, 'mod_as': None},
{'module': 'joblib', 'mod_from': None, 'mod_as': None},
{'module': 'numpy', 'mod_from': None, 'mod_as': 'np'},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}
]
utility.import_modules(importer, imported_modules)
if 'code' in params['profiler'].get('preprocess',{}).keys():
code = params['profiler']['preprocess']['code']
else:
code = ""
code += """
class inputprofiler():
"""
init_code = """
def __init__(self):
"""
init_code += """
# preprocessing
preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl'
if not preprocess_path.exists():
raise ValueError(f'Preprocess model file not found: {preprocess_path}')
self.profiler = joblib.load(preprocess_path)
"""
run_code = """
def run(self,df):
df = df.replace(r'^\\s*$', np.NaN, regex=True)
"""
if 'code' in params['profiler'].get('preprocess',{}).keys():
run_code += """
df = preprocess( df)"""
if params['profiler'].get('unpreprocessed_columns'):
run_code += f"""
unpreprocessed_data = df['{params['profiler']['unpreprocessed_columns'][0]}']
df.drop(['{params['profiler']['unpreprocessed_columns'][0]}'], axis=1,inplace=True)
"""
if params['profiler'].get('force_numeric_conv'):
run_code += f"""
df[{params['profiler']['force_numeric_conv']}] = df[{params['profiler']['force_numeric_conv']}].apply(pd.to_numeric,errors='coerce')"""
run_code += _profiler_main_code(params)
if params['profiler'].get('unpreprocessed_columns'):
run_code += f"""
df['{params['profiler'].get('unpreprocessed_columns')[0]}'] = unpreprocessed_data
"""
run_code += """ return df
"""
utility.import_modules(importer, imported_modules)
import_code = importer.getCode()
return import_code + code + init_code + run_code
def _profiler_main_code(params):
code = f"""
df = self.profiler.transform(df)
columns = {params['profiler']['output_features']}
if isinstance(df, scipy.sparse.spmatrix):
df = pd.DataFrame(df.toarray(), columns=columns)
else:
df = pd.DataFrame(df, columns=columns)
"""
return code
class arima( deployer):
def __init__(self, params={}):
super().__init__( params)
self.name = 'timeseriesforecasting'
def profiler_code( self):
imported_modules = [
{'module': 'numpy', 'mod_from': None, 'mod_as': 'np'},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
]
importer = importModule()
utility.import_modules(importer, imported_modules)
code = """
class inputprofiler():
def __init__(self):
pass
def run( self,df):
df = df.replace(r'^\\s*$', np.NaN, regex=True)
return df[['noofforecasts']]
"""
return importer.getCode() |
+ code
def feature_engg_code(self):
self.importer.addModule(module='pandas',mod_as='pd')
return f"""
class selector():
def __init__(self):
pass
def run(self, df):
return df
"""
def training_code( |
]==2:
prediction[col] = (datasetdf[col].iloc[-1]-datasetdf[col].iloc[-2]) + prediction[col].cumsum()
prediction[col] = datasetdf[col].iloc[-1] + prediction[col].cumsum()
prediction = pred
return(prediction)
def run(self,raw_df,df):
df = self.invertTransformation(df)
df = df.to_json(orient='records',double_precision=2)
outputjson = {{"status":"SUCCESS","data":json.loads(df)}}
return(json.dumps(outputjson))
"""
class fbprophet( deployer):
def __init__(self, params={}):
super().__init__( params)
self.name = 'timeseriesforecasting'
def profiler_code( self):
imported_modules = [
{'module': 'numpy', 'mod_from': None, 'mod_as': 'np'},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
]
importer = importModule()
utility.import_modules(importer, imported_modules)
code = """
class inputprofiler():
def __init__(self):
pass
def run( self,df):
df = df.replace(r'^\\s*$', np.NaN, regex=True)
return df[['noofforecasts']]
"""
return importer.getCode() + code
def feature_engg_code(self):
self.importer.addModule(module='pandas',mod_as='pd')
return f"""
class selector():
def __init__(self):
pass
def run(self, df):
return df
"""
def training_code( self):
self.importer.addModule(module='pandas',mod_as='pd')
self.importer.addModule(module='Path',mod_from='pathlib')
self.importer.addModule(module='joblib')
code = f"""
class trainer():
def __init__(self):
model_file = (Path(__file__).parent/"model")/"{self.params['training']['model_file']}"
if not model_file.exists():
raise ValueError(f'Trained model file not found: {{model_file}}')
self.model = joblib.load(model_file)
"""
code += f"""
def run(self,df):
sessonal_freq = '{self.params['training']['sessonal_freq']}'
ts_prophet_future = self.model.make_future_dataframe(periods=int(df["noofforecasts"][0]),freq=sessonal_freq,include_history = False)
"""
if (self.params['training']['additional_regressors']):
code += f"""
additional_regressors={self.params['training']['additional_regressors']}
ts_prophet_future[additional_regressors] = dataFrame[additional_regressors]
ts_prophet_future.reset_index(drop=True)
ts_prophet_future=ts_prophet_future.dropna()
"""
code += """
train_forecast = self.model.predict(ts_prophet_future)
prophet_forecast_tail=train_forecast[[\\'ds\\', \\'yhat\\', \\'yhat_lower\\',\\'yhat_upper\\']].tail( int(df["noofforecasts"][0]))
return(prophet_forecast_tail)"""
return code
def formatter_code(self):
self.importer.addModule('json')
self.importer.addModule('pandas', mod_as='pd')
return """
class output_format():
def __init__( self):
pass
def run(self,raw_df,df):
df = df.to_json(orient='records')
outputjson = {"status":"SUCCESS","data":json.loads(df)}
return(json.dumps(outputjson))
"""
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import logging
logging.getLogger('tensorflow').disabled = True
import json
import mlflow
import mlflow.sklearn
import mlflow.sagemaker as mfs
# from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
# from sklearn import datasets
import time
import numpy as np
# Load dataset
# from sklearn.datasets import load_iris
import pickle
# Load the pickled model
# from matplotlib import pyplot
import sys
import os
import boto3
import subprocess
import os.path
from os.path import expanduser
import platform
from pathlib import Path
class aionMlopsService:
def __init__(self,model,mlflowtosagemakerDeploy,mlflowtosagemakerPushOnly,mlflowtosagemakerPushImageName,mlflowtosagemakerdeployModeluri,experiment_name,mlflow_modelname,awsaccesskey_id,awssecretaccess_key,aws_session_token,mlflow_container_name,aws_region,aws_id,iam_sagemakerfullaccess_arn,sm_app_name,sm_deploy_option,delete_ecr_repository,ecrRepositoryName):
try:
self.model=model
self.mlflowtosagemakerDeploy=mlflowtosagemakerDeploy
self.mlflowtosagemakerPushOnly=str(mlflowtosagemakerPushOnly)
self.mlflowtosagemakerPushImageName=str(mlflowtosagemakerPushImageName)
self.mlflowtosagemakerdeployModeluri=str(mlflowtosagemakerdeployModeluri)
self.experiment_name=experiment_name
self.mlflow_modelname=mlflow_modelname
self.awsaccesskey_id=awsaccesskey_id
self.awssecretaccess_key=awssecretaccess_key
self.aws_session_token=aws_session_token
self.mlflow_container_name=mlflow_container_name
self.aws_region=aws_region
self.aws_id=aws_id
self.iam_sagemakerfullaccess_arn=iam_sagemakerfullaccess_arn
self.sm_app_name=sm_app_name
self.sm_deploy_option=sm_deploy_option
self.delete_ecr_repository=delete_ecr_repository
self.ecrRepositoryName=ecrRepositoryName
from appbe.dataPath import LOG_LOCATION
sagemakerLogLocation = LOG_LOCATION
try:
os.makedirs(sagemakerLogLocation)
except OSError as e:
if (os.path.exists(sagemakerLogLocation)):
pass
else:
raise OSError('sagemakerLogLocation error.')
self.sagemakerLogLocation=str(sagemakerLogLocation)
filename_mlops = 'mlopslog_'+str(int(time.time()))
filename_mlops=filename_mlops+'.log'
# filename = 'mlopsLog_'+Time()
filepath = os.path.join(self.sagemakerLogLocation, filename_mlops)
logging.basicConfig(filename=filepath, format='%(message)s',filemode='w')
# logging.basicConfig(filename="uq_logging.log", format='%(asctime)s %(message)s',filemode='w')
# logging.basicConfig(filename="uq_logging.log", format=' %(message)s',filemode='w')
# logging.basicConfig(filename='uq_logging.log', encoding='utf-8', level=logging.INFO)
self.log = logging.getLogger('aionMLOps')
self.log.setLevel(logging.DEBUG)
# mlflow.set_experiment(self.experiment_name)
except Exception as e:
self.log.info('<!------------- mlflow model INIT Error ---------------> '+str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def mlflowSetPath(self,path):
track_dir=os.path.join(path,'mlruns')
uri="file:"+str(Path(track_dir))
return uri
#Currently not used this delete ecr repository option
def ecr_repository_delete(self,rep_name):
# import subprocess
client = boto3.client('ecr')
repositories = client.describe_repositories()
ecr_delete_rep=client.delete_repository(registryId=self.aws_id,repositoryName=self.ecrRepositoryName,force=True)
mlflow_ecr_delete=subprocess.run(['aws', 'ecr', 'delete-repository','--repository-name',rep_name,'||','true'])
self.log.info('Success: deleted aws ecr repository which contains mlops image.')
def check_sm_deploy_status(self,app_name):
sage_client = boto3.client('sagemaker', region_name=self.aws_region)
endpoint_description = sage_client.describe_endpoint(EndpointName=app_name)
endpoint_status = endpoint_description["EndpointStatus"]
try:
failure_reason=endpoint_description["FailureReason"]
self.log.info("sagemaker end point creation failure reason is: "+str(failure_reason))
except:
pass
endpoint_status=str(endpoint_status)
return endpoint_status
def invoke_sm_endpoint(self,app_name, input_json):
client = boto3.session.Session().client("sagemaker-runtime", self.aws_region)
response = client.invoke_endpoint(
EndpointName=app_name,
Body=input_json,
ContentType='application/json; format=pandas-split',
)
# preds = response['Body'].read().decode("ascii")
preds = response['Body'].read().decode("ascii")
preds = json.loads(preds)
# print("preds: {}".format(preds))
return preds
def predict_sm_app_endpoint(self,X_test):
#print(X_test)
import pandas as pd
prediction=None
AWS_ACCESS_KEY_ID=str(self.awsaccesskey_id)
AWS_SECRET_ACCESS_KEY=str(self.awssecretaccess_key)
AWS_SESSION_TOKEN=str(self.aws_session_token)
region = str(self.aws_region)
#Existing model deploy options
# mlflowtosagemakerPushImageName=str(self.mlflowtosagemakerPushImageName)
# mlflowtosagemakerdeployModeluri=str(self.mlflowtosagemakerdeployModeluri)
try:
import subprocess
cmd = 'aws configure set region_name '+region
os.system(cmd)
cmd = 'aws configure set aws_access_key_id '+AWS_ACCESS_KEY_ID
os.system(cmd)
cmd = 'aws configure set aws_secret_access_key '+AWS_SECRET_ACCESS_KEY
os.system(cmd)
'''
aws_region=subprocess.run(['aws', 'configure', 'set','region_name',region])
aws_accesskeyid=subprocess.run(['aws', 'configure', 'set','aws_access_key_id',AWS_ACCESS_KEY_ID])
aws_secretaccesskey=subprocess.run(['aws', 'configure', 'set','aws_secret_access_key',AWS_SECRET_ACCESS_KEY])
'''
except:
pass
#Create a session for aws communication using aws boto3 lib
# s3_client = boto3.client('ecr',aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region)
# s3 = boto3.resource('ecr', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key= AWS_SECRET_ACCESS_KEY)
session = boto3.Session(aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region)
#X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=2)
# query_input = pd.DataFrame(X_test).iloc[[1,5]].to_json(orient="split")
try:
query_input = pd.DataFrame(X_test).to_json(orient="split")
#print(query_input)
prediction = self.invoke_sm_endpoint(app_name=self.sm_app_name, input_json=query_input)
# self.log.info("sagemaker end point Prediction: \\n"+str(prediction))
except Exception as e:
print(e)
return prediction
def deleteSagemakerApp(self,app_name,region):
# import mlflow.sagemaker as mfs
# region = 'ap-south-1'
# app_name = 'aion-demo-app'
mfs.delete(app_name=app_name,region_name=region, archive=False,synchronous=True, timeout_seconds=300)
# print("AION mlops sagemaker application endpoint is deleted....\\n")
self.log.info('AION mlops sagemaker application endpoint is deleted, application name is: '+str(app_name))
def deployModel2sagemaker(self,mlflow_container_name,tag_id,model_path):
region = str(self.aws_region)
aws_id = str(self.aws_id)
iam_sagemakerfullaccess_arn = str(self.iam_sagemakerfullaccess_arn)
app_name = str(self.sm_app_name)
model_uri = str(model_path)
app_status=False
mlflow_root_dir = None
try:
os. |
chdir(str(self.sagemakerLogLocation))
mlflow_root_dir = os.getcwd()
self.log.info('mlflow root dir: '+str(mlflow_root_dir))
except:
self.log.info("path issue.")
try:
c_status=self.check_sm_deploy_status(app_name)
#if ((c_status == "Failed") or (c_status == "OutOfService")):
if ((c_status == "Failed") or (c_status.lower() == "failed")):
app_status=False
self.log.info("Sagemaker endpoint status: Failed.\\n")
mfs.delete(app_name=app_name,region_name=region, archive=False,synchronous=True, timeout_seconds=300)
elif ((c_status.lower() == "inservice") or (c_status == "InService")):
app_status=True
self.log.info("Sagemaker endpoint status: InService. Running sagemaker endpoint name: \\n"+str(app_name))
else:
app_status=False
pass
except:
# print("deploy status error.\\n")
pass
#aws ecr model app_name should contain only [[a-zA-Z0-9-]]
import re
if app_name:
pattern = re.compile("[A-Za-z0-9-]+")
# if found match (entire string matches pattern)
if pattern.fullmatch(app_name) is not None:
#print("Found match: ")
pass
else:
app_name = 'aion-demo-app'
else:
app_name = 'aion-demo-app'
mlflow_image=mlflow_container_name+':'+tag_id
image_url = aws_id + '.dkr.ecr.' + region + '.amazonaws.com/' + mlflow_image
deploy_option="create"
self.log.info('deploy_option: \\n'+str(deploy_option))
if (deploy_option.lower() == "create"):
# Other deploy modes: mlflow.sagemaker.DEPLOYMENT_MODE_ADD,mlflow.sagemaker.DEPLOYMENT_MODE_REPLACE
if not (app_status):
try:
mfs.deploy(app_name=app_name,model_uri=model_uri,region_name=region,mode="create",execution_role_arn=iam_sagemakerfullaccess_arn,image_url=image_url)
self.log.info('sagemaker endpoint created and model deployed. Application name is: \\n'+str(app_name))
except:
self.log.info('Creating end point application issue.Please check the connection and aws credentials \\n')
else:
self.log.info('Sagemaker application with user endpoint name already running.Please check. Please delete the old endpoint with same name.\\n')
elif (deploy_option.lower() == "delete"):
# import mlflow.sagemaker as mfs
# # region = 'ap-south-1'
# # app_name = 'aion-demo-app'
# mfs.delete(app_name=app_name,region_name=region, archive=False,synchronous=True, timeout_seconds=300)
# print("Mlflow sagemaker application endpoint is deleted....\\n")
# self.log.info('Mlflow sagemaker application endpoint is deleted, application name is: '+str(app_name))
pass
elif (deploy_option.lower() == "add"):
pass
elif (deploy_option.lower() == "replace"):
pass
else:
pass
return app_status
def mlflow2sagemaker_deploy(self):
self.log.info('<!------------- Inside AION mlops to sagemaker communication and deploy process. ---------------> ')
deploy_status=False
app_name = str(self.sm_app_name)
self.log.info('Sagemaker Application Name: '+str(app_name))
uri_mlflow=self.mlflowSetPath(self.sagemakerLogLocation)
mlflow.set_tracking_uri(uri_mlflow)
mlops_trackuri=mlflow.get_tracking_uri()
mlops_trackuri=str(mlops_trackuri)
self.log.info('mlops tracking uri: '+str(mlops_trackuri))
localhost_deploy=False
try:
#Loading aion model to deploy in sagemaker
mlflow.set_experiment(self.experiment_name)
self.log.info('Endpoint Name: '+str(self.experiment_name))
# Assume, the model already loaded from joblib in aionmlflow2smInterface.py file.
aionmodel2deploy=self.model
# run_id = None
# experiment_id=None
# Use the loaded pickled model to make predictions
# pred = knn_from_pickle.predict(X_test)
with mlflow.start_run(run_name='AIONMLOps') as run:
# aionmodel2deploy.fit(X_train, y_train)
# predictions = aionmodel2deploy.predict(X_test)
mlflow.sklearn.log_model(aionmodel2deploy, self.mlflow_modelname)
run_id = run.info.run_uuid
experiment_id = run.info.experiment_id
self.log.info('AION mlops experiment run_id: '+str(run_id))
self.log.info('AION mlops experiment experiment_id: '+str(experiment_id))
self.log.info('AION mlops experiment model_name: '+str(self.mlflow_modelname))
artifact_uri = {mlflow.get_artifact_uri()}
# print("1.artifact_uri: \\n",artifact_uri)
mlflow.end_run()
#If we need, we can check the mlflow experiments.
# try:
# mlflow_client = mlflow.tracking.MlflowClient('./mlruns')
# exp_list = mlflow_client.list_experiments()
# except:
# pass
#print("mlflow exp_list: \\n",exp_list)
mlflow_modelname=str(self.mlflow_modelname)
mlops_trackuri=mlops_trackuri.replace('file:','')
mlops_trackuri=str(mlops_trackuri)
# mlflow_root_dir = os.getcwd()
mlflow_root_dir = None
try:
os.chdir(str(self.sagemakerLogLocation))
mlflow_root_dir = os.getcwd()
self.log.info('mlflow root dir: '+str(mlflow_root_dir))
except:
self.log.info("path issue.")
model_path = 'mlruns/%s/%s/artifacts/%s' % (experiment_id, run_id,self.mlflow_modelname)
# model_path=mlops_trackuri+'\\\\%s\\\\%s\\\\artifacts\\\\%s' % (experiment_id, run_id,mlflow_modelname)
self.log.info("local host aion mlops model_path is: "+str(model_path))
time.sleep(2)
#print("Environment variable setup in the current working dir for aws sagemaker cli connection... \\n")
self.log.info('Environment variable setup in the current working dir for aws sagemaker cli connection... \\n ')
AWS_ACCESS_KEY_ID=str(self.awsaccesskey_id)
AWS_SECRET_ACCESS_KEY=str(self.awssecretaccess_key)
AWS_SESSION_TOKEN=str(self.aws_session_token)
region = str(self.aws_region)
#Existing model deploy options
mlflowtosagemakerPushImageName=str(self.mlflowtosagemakerPushImageName)
mlflowtosagemakerdeployModeluri=str(self.mlflowtosagemakerdeployModeluri)
import subprocess
cmd = 'aws configure set region_name '+region
os.system(cmd)
cmd = 'aws configure set aws_access_key_id '+AWS_ACCESS_KEY_ID
os.system(cmd)
cmd = 'aws configure set aws_secret_access_key '+AWS_SECRET_ACCESS_KEY
os.system(cmd)
#Create a session for aws communication using aws boto3 lib
# s3_client = boto3.client('ecr',aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region)
# s3 = boto3.resource('ecr', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key= AWS_SECRET_ACCESS_KEY)
session = boto3.Session(aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region)
# session = boto3.session.Session(
# aws_access_key_id=AWS_ACCESS_KEY_ID,
# aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
# aws_session_token=AWS_SESSION_TOKEN
# )
# awsclient = session.resource('ecr')
# s3 = session.resource('s3')
self.log.info('aws environment variable setup done... \\n')
try:
os.chdir(mlflow_root_dir)
except FileNotFoundError:
self.log.info('Directory does not exist. '+str(mlflow_root_dir))
except NotADirectoryError:
self.log.info('model_path is not a directory. '+str(mlflow_root_dir))
except PermissionError:
self.log.info('Issue in permissions to change to model dir. '+str(mlflow_root_dir))
mlflow_container_name=str(self.mlflow_container_name)
mlflow_version=mlflow.__version__
tag_id=mlflow_version
if (self.mlflowtosagemakerPushOnly.lower() == "true"):
self.log.info('Selected option is <Deploy existing model to sagemaker> \\n')
aws_id=str(self.aws_id)
arn=str(self.iam_sagemakerfullaccess_arn)
mlflow_image=mlflow_container_name+':'+tag_id
image_url = aws_id+'.dkr.ecr.'+region+'.amazonaws.com/'+mlflow_image
# print("image_url:========= \\n",image_url)
deploy_status=True
try:
model_path=mlflowtosagemakerdeployModeluri
# ##We need to run mlflow docker container command in the artifacts->model directory inside mlruns.
self.log.info('Deploy existing model container-Model path given by user: '+str(model_path))
try:
os.chdir(model_path)
except FileNotFoundError:
self.log.info('Directory does not exist. '+str(model_path))
except NotADirectoryError:
self.log.info('model_path is not a directory. '+str(model_path))
except PermissionError:
self.log.info('Issue in permissions to change to model dir. '+str(model_path))
try:
mfs.push_image_to_ecr(image=mlflowtosagemakerPushImageName)
deploy_status=True
self.log.info('AION mlops pushed the docker container to aws ecr. \\n ')
except:
self.log.info("error in pushing existing container to ecr.\\n")
deploy_status=False
time.sleep(2)
#Now,change the working dir to root dir,because now deploy needs full mlruns to model name dir.
try:
# print(" Changing directory to mlflow root dir....\\n")
os.chdir(mlflow_root_dir)
except FileNotFoundError:
self.log.info('model path is not a directory. '+str(mlflow_root_dir))
except NotADirectoryError:
self.log.info('model path is not a directory. '+str(mlflow_root_dir))
# print("{0} is not a directory".format(mlflow_root_dir))
except PermissionError:
self.log.info('Issue in permissions to change to model dir. '+str(mlflow_root_dir))
# self.deployModel2sagemaker(mlflowtosagemakerPushImageName,tag_id,mlflowtosagemakerdeployModeluri)
try:
if (deploy_status):
self.deployModel2sagemaker(mlflowtosagemakerPushImageName,tag_id,mlflowtosagemakerdeployModeluri)
self.log.info('AION creates docker container and push the container into aws ecr.. ')
time.sleep(2)
except:
self.log.info('AION deploy error.check connection and aws config parameters. ')
deploy_status=False
# self.log.info('model deployed in sagemaker. ')
except Exception as e:
self.log.info('AION mlops failed to push docker container in aws ecr, check configuration parameters. \\n'+str(e))
elif (self.mlflowtosagemakerPushOnly.lower() == "false"):
if (self.mlflowtosagemakerDeploy.lower() == "true"):
self.log.info('Selected option is <Create and Deploy model> \\n')
deploy_status=True
try:
# ##We need to run mlflow docker container command in the artifacts->model directory inside mlruns.
try:
os.chdir(model_path)
except FileNotFoundError:
self.log.info('Directory does not exist. '+str(model_path))
except NotADirectoryError:
self.log.info('model_path is not a directory. '+str(model_path))
except PermissionError:
self.log.info('Issue in permissions to change to model dir. '+str(model_path))
try:
mlflow_container_push=subprocess.run(['mlflow', 'sagemaker', 'build-and-push-container','--build','--push','--container',mlflow_container_name])
self.log.info('AION mlops creates docker container and push the container into aws ecr.. ')
deploy_status=True
time.sleep(2)
except:
self.log.info('error in pushing aion model container to sagemaker, please check the connection between local host to aws server.')
deploy_status=False
self.log.info('Now deploying the model container to sagemaker starts....\\n ')
# Once docker push completes, again going back to mlflow parent dir for deployment
#Now,change the working dir to root dir,because now deploy needs full mlruns to model name dir.
try:
os.chdir(mlflow_root_dir)
except |
FileNotFoundError:
self.log.info('model_path does not exist. '+str(mlflow_root_dir))
except NotADirectoryError:
self.log.info('model_path is not a directory. '+str(mlflow_root_dir))
except PermissionError:
self.log.info('Issue in permissions to change to model dir. '+str(mlflow_root_dir))
# app_name = str(self.sm_app_name)
try:
if (deploy_status):
self.deployModel2sagemaker(mlflow_container_name,tag_id,model_path)
except:
self.log.info('mlops deploy error.check connection')
deploy_status=False
except Exception as e:
exc = {"status":"FAIL","message":str(e).strip('"')}
out_exc = json.dumps(exc)
self.log.info('mlflow failed to creates docker container please check the aws iam,ecr permission setup, aws id access_key,secret key values for aion.\\n')
elif(self.mlflowtosagemakerDeploy.lower() == "false"):
deploy_status=False
localhost_deploy=True
self.log.info('Selected option is <Create AION mlops container in local host .> \\n')
self.log.info("User selected create-Deploy sagemaker option as False,")
self.log.info("Creates the AION mlops-sagemaker container locally starting,but doesn't push into aws ecr and deploy in sagemaker. Check the container in docker repository. ")
try:
# ##We need to run AION mlops docker container command in the artifacts->model directory inside mlruns.
try:
os.chdir(model_path)
self.log.info('After change to AION mlops model dir, cwd: '+str(model_path))
except FileNotFoundError:
self.log.info('Directory does not exist. '+str(model_path))
except NotADirectoryError:
self.log.info('model_path is not a directory. '+str(model_path))
except PermissionError:
self.log.info('Issue in permissions to change to model dir. '+str(model_path))
# mlflow_container_local=subprocess.run(['AION mlops', 'sagemaker', 'build-and-push-container','--build','--no-push','--container',mlflow_container_name])
try:
if not (deploy_status):
mlflow_container_local=subprocess.run(['mlflow', 'sagemaker', 'build-and-push-container','--build','--no-push','--container',mlflow_container_name])
self.log.info('AION creates local host bsed docker container and push the container local docker repository. Check with <docker images> command.\\n ')
localhost_deploy=True
time.sleep(2)
except:
self.log.info('error in pushing aion model container to sagemaker, please check the connection between local host to aws server.')
deploy_status=False
localhost_deploy=False
# print("AION mlops creates docker container and push the container into aws ecr.\\n")
self.log.info('AION mlops creates docker container and stored locally... ')
time.sleep(2)
except Exception as e:
localhost_deploy=False
# print("mlflow failed to creates docker container please check the aws iam,ecr permission setup, aws id access_key,secret key values for aion.\\n")
self.log.info('AION mlops failed to creates docker container in local machine.\\n'+str(e))
else:
self.log.info('Deploy option not selected, Please check. ')
localhost_deploy=False
deploy_status=False
else:
pass
localhost_container_status="Notdeployed"
mlflow2sm_deploy_status="Notdeployed"
if localhost_deploy:
localhost_container_status="success"
mlflow2sm_deploy_status="Notdeployed"
# print("AION creates local docker container successfully.Please check in docker repository.")
self.log.info("AION creates local docker container successfully.Please check in docker repository.")
# else:
# localhost_container_status="failed"
# # print("AION failed to create local docker container successfully.Please check in docker repository.")
# self.log.info("AION failed to create local docker container successfully.Please check in docker repository.")
if (deploy_status):
# Finally checking whether mlops model is deployed to sagemaker or not.
app_name = str(self.sm_app_name)
deploy_s = self.check_sm_deploy_status(app_name)
if (deploy_s == "InService"):
# print("AION mlops model is deployed at aws sagemaker, use application name(app_name) and region to access.\\n")
self.log.info('AION mlops model is deployed at aws sagemaker, use application name(app_name) and region to access.\\n'+str(app_name))
mlflow2sm_deploy_status="success"
localhost_container_status="Notdeployed"
else:
# print("AION Mlflow model not able to deploy at aws sagemaker\\n")
self.log.info('AION mlops model not able to deploy at aws sagemaker.\\n')
mlflow2sm_deploy_status="failed"
localhost_container_status="Notdeployed"
# else:
# mlflow2sm_deploy_status="None"
return mlflow2sm_deploy_status,localhost_container_status
except Exception as inst:
exc = {"status":"FAIL","message":str(inst).strip('"')}
out_exc = json.dumps(exc)
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> from kafka import KafkaConsumer
from json import loads
import pandas as pd
import json
import os,sys
import time
import multiprocessing
from os.path import expanduser
import platform
import datetime
modelDetails = {}
class Process(multiprocessing.Process):
def __init__(self, modelSignature,jsonData,predictedData,modelpath):
super(Process, self).__init__()
self.config = jsonData
self.modelSignature = modelSignature
self.data = predictedData
self.modelpath = modelpath
def run(self):
#data = pd.json_normalize(self.data)
minotoringService = self.config['minotoringService']['url']
trainingdatalocation = self.config['trainingDataLocation'][self.modelSignature]
#filetimestamp = 'AION_'+str(int(time.time()))+'.csv'
#data.to_csv(dataFile, index=False)
inputFieldsJson = {"trainingDataLocation":trainingdatalocation,"currentDataLocation":self.data}
inputFieldsJson = json.dumps(inputFieldsJson)
ser_url = minotoringService+self.modelSignature+'/monitoring'
driftTime = datetime.datetime.now()
import requests
try:
response = requests.post(ser_url, data=inputFieldsJson,headers={"Content-Type":"application/json",})
outputStr=response.content
outputStr = outputStr.decode('utf-8')
outputStr = outputStr.strip()
decoded_data = json.loads(outputStr)
print(decoded_data)
status = decoded_data['status']
msg = decoded_data['data']
except Exception as inst:
if 'Failed to establish a new connection' in str(inst):
status = 'Fail'
msg = 'AION Service needs to be started'
else:
status = 'Fail'
msg = 'Error during Drift Analysis'
statusFile = os.path.join(self.modelpath,self.modelSignature+'_status.csv')
df = pd.DataFrame(columns = ['dateTime', 'status', 'msg'])
df = df.append({'dateTime' : driftTime, 'status' : status, 'msg' : msg},ignore_index = True)
print(df)
if (os.path.exists(statusFile)):
df.to_csv(statusFile, mode='a', header=False,index=False)
else:
df.to_csv(statusFile, header=True,index=False)
def launch_kafka_consumer():
from appbe.dataPath import DATA_DIR
configfile = os.path.join(os.path.dirname(__file__),'..','config','kafkaConfig.conf')
with open(configfile,'r',encoding='utf-8') as f:
jsonData = json.load(f)
f.close()
kafkaIP=jsonData['kafkaCluster']['ip']
kafkaport = jsonData['kafkaCluster']['port']
topic = jsonData['kafkaCluster']['topic']
kafkaurl = kafkaIP+':'+kafkaport
if jsonData['database']['csv'] == 'True':
database = 'csv'
elif jsonData['database']['mySql'] == 'True':
database = 'mySql'
else:
database = 'csv'
kafkaPath = os.path.join(DATA_DIR,'kafka')
if not (os.path.exists(kafkaPath)):
try:
os.makedirs(kafkaPath)
except OSError as e:
pass
consumer = KafkaConsumer(topic,bootstrap_servers=[kafkaurl],auto_offset_reset='earliest',enable_auto_commit=True,group_id='my-group',value_deserializer=lambda x: loads(x.decode('utf-8')))
for message in consumer:
message = message.value
data = message['data']
data = pd.json_normalize(data)
modelname = message['usecasename']
version = message['version']
modelSignature = modelname+'_'+str(version)
modelpath = os.path.join(kafkaPath,modelSignature)
try:
os.makedirs(modelpath)
except OSError as e:
pass
secondsSinceEpoch = time.time()
if modelSignature not in modelDetails:
modelDetails[modelSignature] = {}
modelDetails[modelSignature]['startTime'] = secondsSinceEpoch
if database == 'csv':
csvfile = os.path.join(modelpath,modelSignature+'.csv')
if (os.path.exists(csvfile)):
data.to_csv(csvfile, mode='a', header=False,index=False)
else:
data.to_csv(csvfile, header=True,index=False)
modelTimeFrame = jsonData['timeFrame'][modelSignature]
currentseconds = time.time()
print(currentseconds - modelDetails[modelSignature]['startTime'])
if (currentseconds - modelDetails[modelSignature]['startTime']) >= float(modelTimeFrame):
csv_path = os.path.join(modelpath,modelSignature+'.csv')
#predictedData = pd.read_csv(csv_path)
##predictedData = predictedData.to_json(orient="records")
index = Process(modelSignature,jsonData,csv_path,modelpath)
index.start()
modelDetails[modelSignature]['startTime'] = secondsSinceEpoch
<s> import os
import shutil
import sys
import subprocess
from os.path import expanduser
import platform
import json
def createDockerImage(model_name,model_version,module,folderpath):
command = 'docker pull python:3.8-slim-buster'
os.system(command);
subprocess.check_call(["docker", "build", "-t",module+'_'+model_name.lower()+":"+model_version,"."], cwd=folderpath)
def local_docker_build(config):
print(config)
config = json.loads(config)
model_name = config['usecase']
model_version = config['version']
mlaac__code_path = config['mlacPath']
docker_images = {}
docker_images['ModelMonitoring'] = 'modelmonitoring'+'_'+model_name.lower()+':'+model_version
dataset_addr = os.path.join(mlaac__code_path,'ModelMonitoring')
createDockerImage(model_name,model_version,'modelmonitoring',dataset_addr)
docker_images['DataIngestion'] = 'dataingestion'+'_'+model_name.lower()+':'+model_version
dataset_addr = os.path.join(mlaac__code_path,'DataIngestion')
createDockerImage(model_name,model_version,'dataingestion',dataset_addr)
transformer_addr = os.path.join(mlaac__code_path,'DataTransformation')
docker_images['DataTransformation'] = 'datatransformation'+'_'+model_name.lower()+':'+model_version
createDockerImage(model_name,model_version,'datatransformation',transformer_addr)
featureengineering_addr = os.path.join(mlaac__code_path,'FeatureEngineering')
docker_images['FeatureEngineering'] = 'featureengineering'+'_'+model_name.lower()+':'+model_version
createDockerImage(model_name,model_version,'featureengineering',featureengineering_addr)
from os import listdir
arr = [filename for filename in os.listdir(mlaac__code_path) if filename.startswith("ModelTraining")]
docker_training_images = []
for x in arr:
dockertraing={}
dockertraing['Training'] = str(x).lower()+'_'+model_name.lower()+':'+model_version
docker_training_images.append(dockertraing)
training_addri = os.path.join(mlaac__code_path,x)
createDockerImage(model_name,model_version,str(x).lower(),training_addri)
docker_images['ModelTraining'] = docker_training_images
docker_images['ModelRegistry'] = 'modelregistry'+'_'+model_name.lower()+':'+model_version
deploy_addr = os.path.join(mlaac__code_path,'ModelRegistry')
createDockerImage(model_name,model_version,'modelregistry',deploy_addr)
docker_images['ModelServing'] = 'modelserving'+'_'+model_name.lower()+':'+model_version
deploy_addr = os.path.join(mlaac__code_path,'ModelServing')
createDockerImage(model_name,model_version,'modelserving',deploy_addr)
outputjsonFile = os.path.join(mlaac__code_path,'dockerlist.json')
with open(outputjsonFile, 'w') as f:
json.dump(docker_images, f)
f.close()
output = {'Status':'Success','Msg':outputjsonFile}
output = json.dumps(output)
print("aion_build_container:",output)<s> import docker
import json
import logging
def read_json(file_path):
data = None
|
with open(file_path,'r') as f:
data = json.load(f)
return data
def run_pipeline(inputconfig):
inputconfig = json.loads(inputconfig)
logfilepath = inputconfig['logfilepath']
logging.basicConfig(level=logging.INFO,filename =logfilepath)
usecasename = inputconfig['usecase']
logging.info("UseCaseName :"+str(usecasename))
version = inputconfig['version']
logging.info("version :"+str(version))
config = inputconfig['dockerlist']
persistancevolume = inputconfig['persistancevolume']
logging.info("PersistanceVolume :"+str(persistancevolume))
datasetpath = inputconfig['datasetpath']
logging.info("DataSet Path :"+str(datasetpath))
config = read_json(config)
client = docker.from_env()
inputconfig = {'modelName':usecasename,'modelVersion':str(version),'dataLocation':datasetpath}
inputconfig = json.dumps(inputconfig)
inputconfig = inputconfig.replace('"', '\\\\"')
logging.info("===== Model Monitoring Container Start =====")
outputStr = client.containers.run(config['ModelMonitoring'],'python code.py -i'+datasetpath,volumes=[persistancevolume+':/aion'])
outputStr = outputStr.decode('utf-8')
logging.info('ModelMonitoring: '+str(outputStr))
print('ModelMonitoring: '+str(outputStr))
logging.info("===== ModelMonitoring Stop =====")
logging.info("===== Data Ingestion Container Start =====")
outputStr = client.containers.run(config['DataIngestion'],'python code.py',volumes=[persistancevolume+':/aion'])
outputStr = outputStr.decode('utf-8')
logging.info('DataIngestion: '+str(outputStr))
print('DataIngestion: '+str(outputStr))
logging.info("===== Data Ingestion Container Stop =====")
outputStr = outputStr.strip()
decoded_data = json.loads(outputStr)
status = decoded_data['Status']
if status != 'Success':
output = {'Status':'Error','Msg':'Data Ingestion Fails'}
logging.info("===== Transformation Container Start =====")
outputStr = client.containers.run(config['DataTransformation'],'python code.py',volumes=[persistancevolume+':/aion'])
outputStr = outputStr.decode('utf-8')
logging.info('Data Transformations: '+str(outputStr))
print('Data Transformations: '+str(outputStr))
logging.info("===== Transformation Container Done =====")
outputStr = outputStr.strip()
decoded_data = json.loads(outputStr)
status = decoded_data['Status']
if status != 'Success':
output = {'Status':'Error','Msg':'Data Transformations Fails'}
logging.info("===== Feature Engineering Container Start =====")
outputStr = client.containers.run(config['FeatureEngineering'],'python code.py',volumes=[persistancevolume+':/aion'])
outputStr = outputStr.decode('utf-8')
logging.info('FeatureEngineering: '+str(outputStr))
print('FeatureEngineering: '+str(outputStr))
logging.info("===== Feature Engineering Container Done =====")
outputStr = outputStr.strip()
decoded_data = json.loads(outputStr)
status = decoded_data['Status']
modeltraining = config['ModelTraining']
for mt in modeltraining:
logging.info("===== Training Container Start =====")
outputStr = client.containers.run(mt['Training'],'python code.py',volumes=[persistancevolume+':/aion'])
outputStr = outputStr.decode('utf-8')
logging.info('ModelTraining: '+str(outputStr))
print('ModelTraining: '+str(outputStr))
logging.info("===== Training Container Done =====")
outputStr = outputStr.strip()
try:
decoded_data = json.loads(outputStr)
status = decoded_data['Status']
except Exception as inst:
logging.info(inst)
logging.info("===== Model Registry Start =====")
outputStr = client.containers.run(config['ModelRegistry'],'python code.py',volumes=[persistancevolume+':/aion'])
outputStr = outputStr.decode('utf-8')
logging.info('ModelRegistry: '+str(outputStr))
print('ModelRegistry: '+str(outputStr))
logging.info("===== ModelRegistry Done =====")
logging.info("===== ModelServing Start =====")
outputStr = client.containers.run(config['ModelServing'],'python code.py',volumes=[persistancevolume+':/aion'])
outputStr = outputStr.decode('utf-8')
logging.info('Prediction: '+str(outputStr))
print('Prediction: '+str(outputStr))
logging.info("===== ModelServing Done =====") <s> import os
import sys
import json
from pathlib import Path
import subprocess
import shutil
import argparse
def create_and_save_yaml(git_storage_path, container_label,usecasepath):
file_name_prefix = 'gh-acr-'
yaml_file = f"""\\
name: gh-acr-{container_label}
on:
push:
branches: main
paths: {container_label}/**
workflow_dispatch:
jobs:
gh-acr-build-push:
runs-on: ubuntu-latest
steps:
- name: 'checkout action'
uses: actions/checkout@main
- name: 'azure login'
uses: azure/login@v1
with:
creds: ${{{{ secrets.AZURE_CREDENTIALS }}}}
- name: 'build and push image'
uses: azure/docker-login@v1
with:
login-server: ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}
username: ${{{{ secrets.REGISTRY_USERNAME }}}}
password: ${{{{ secrets.REGISTRY_PASSWORD }}}}
- run: |
docker build ./{container_label}/ModelMonitoring -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelmonitoring:{container_label}
docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelmonitoring:{container_label}
docker build ./{container_label}/DataIngestion -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/dataingestion:{container_label}
docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/dataingestion:{container_label}
docker build ./{container_label}/DataTransformation -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/datatransformation:{container_label}
docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/datatransformation:{container_label}
docker build ./{container_label}/FeatureEngineering -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/featureengineering:{container_label}
docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/featureengineering:{container_label}
docker build ./{container_label}/ModelRegistry -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelregistry:{container_label}
docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelregistry:{container_label}
docker build ./{container_label}/ModelServing -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelserving:{container_label}
docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelserving:{container_label}
"""
arr = [filename for filename in os.listdir(usecasepath) if filename.startswith("ModelTraining")]
for x in arr:
yaml_file+=' docker build ./'+container_label+'/'+x+' -t ${{ secrets.REGISTRY_LOGIN_SERVER }}/'+x.lower()+':'+container_label
yaml_file+='\\n'
yaml_file+=' docker push ${{ secrets.REGISTRY_LOGIN_SERVER }}/'+x.lower()+':'+container_label
yaml_file+='\\n'
with open(Path(git_storage_path)/(file_name_prefix + container_label + '.yaml'), 'w') as f:
f.write(yaml_file)
def run_cmd(cmd):
try:
subprocess.check_output(cmd, stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
if e.stderr:
if isinstance(e.stderr, bytes):
err_msg = e.stderr.decode(sys.getfilesystemencoding())
else:
err_msg = e.stderr
elif e.output:
if isinstance(e.output, bytes):
err_msg = e.output.decode(sys.getfilesystemencoding())
else:
err_msg = e.output
else:
err_msg = str(e)
return False, err_msg
return True, ""
def validate_config(config):
non_null_keys = ['url','username', 'token', 'location', 'gitFolderLocation', 'email', 'modelName']
missing_keys = [k for k in non_null_keys if k not in config.keys()]
if missing_keys:
raise ValueError(f"following fields are missing in config file: {missing_keys}")
for k,v in config.items():
if k in non_null_keys and not v:
raise ValueError(f"Please provide value for '{k}' in config file.")
def upload(config):
validate_config(config)
url_type = config.get('url_type','https')
if url_type == 'https':
https_str = "https://"
url = https_str + config['username'] + ":" + config['token'] + "@" + config['url'][len(https_str):]
else:
url = config['url']
model_location = Path(config['location'])
git_folder_location = Path(config['gitFolderLocation'])
git_folder_location.mkdir(parents=True, exist_ok=True)
(git_folder_location/'.github'/'workflows').mkdir(parents=True, exist_ok=True)
if not model_location.exists():
raise ValueError('Trained model data not found')
os.chdir(str(git_folder_location))
(git_folder_location/config['modelName']).mkdir(parents=True, exist_ok=True)
shutil.copytree(model_location, git_folder_location/config['modelName'], dirs_exist_ok=True)
create_and_save_yaml((git_folder_location/'.github'/'workflows'), config['modelName'],config['location'])
if (Path(git_folder_location)/'.git').exists():
first_upload = False
else:
first_upload = True
if first_upload:
cmd = ['git','init']
status, msg = run_cmd(cmd)
if not status:
raise ValueError(msg)
cmd = ['git','config','user.name',config['username']]
status, msg = run_cmd(cmd)
if not status:
raise ValueError(msg)
cmd = ['git','config','user.email',config['email']]
status, msg = run_cmd(cmd)
if not status:
raise ValueError(msg)
cmd = ['git','add', '-A']
status, msg = run_cmd(cmd)
if not status:
raise ValueError(msg)
cmd = ['git','commit','-m',f"commit {config['modelName']}"]
status, msg = run_cmd(cmd)
if not status:
raise ValueError(msg)
cmd = ['git','branch','-M','main']
status, msg = run_cmd(cmd)
if not status:
raise ValueError(msg)
if first_upload:
cmd = ['git','remote','add','origin', url]
status, msg = run_cmd(cmd)
if not status:
raise ValueError(msg)
cmd = ['git','push','-f','-u','origin', 'main']
status, msg = run_cmd(cmd)
if not status:
raise ValueError(msg)
else:
cmd = ['git','push']
status, msg = run_cmd(cmd)
if not status:
raise ValueError(msg)
return json.dumps({'Status':'SUCCESS'})
if __name__ == '__main__':
try:
if shutil.which('git') is None:
raise ValueError("git is not installed on this system")
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', help='Config file location or as a string')
args = parser.parse_args()
if Path(args.config).is_file() and Path(args.config).suffix == '.json':
with open(args.config,'r') as f:
config = json.load(f)
else:
config = json.loads(args.config)
print(upload(config))
except Exception as e:
status = {'Status':'Failure','msg':str(e)}
print(json.dumps(status))<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import logging
logging.getLogger('tensorflow').disabled = True
import json
#from nltk.corpus import stopwords
from collections import Counter
from numpy import mean
from numpy import std
from pandas import read_csv
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
from learner.machinelearning import machinelearning
# from sklearn.dummy import DummyClassifier
# create histograms of numeric input variables
import sys
import os
import re
import pandas as pd
import numpy as np
from learner.aion_matrix import aion_matrix
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
import autokeras as ak
# load the sonar dataset
from sklearn.model_selection import train_test_split
# from sklearn.metrics import cohen_kappa_score
# from sklearn.metrics import roc_auc_score
# from |
sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve
from math import sqrt
from sklearn.metrics import mean_squared_error, explained_variance_score,mean_absolute_error
from sklearn import metrics
class aionNAS:
def __init__(self,nas_class,nas_params,xtrain1,xtest1,ytrain1,ytest1,deployLocation):
try:
self.dfFeatures=None
self.nas_class=nas_class
self.nas_params=nas_params
self.targetFeature=None
self.log = logging.getLogger('eion')
self.n_models=int(self.nas_params['n_models'])
self.n_epochs=int(self.nas_params['n_epochs'])
self.optimizer=self.nas_params['optimizer']
self.metrics=self.nas_params['metrics']
self.tuner=self.nas_params['tuner']
self.seed=int(self.nas_params['seed'])
self.xtrain = xtrain1
self.xtest = xtest1
self.ytrain = ytrain1
self.ytest = ytest1
#self.labelMaps = labelMaps
self.deployLocation=deployLocation
except Exception as e:
self.log.info('<!------------- NAS INIT Error ---------------> ')
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def paramCheck(self):
try:
if not (self.nas_class):
self.log.info('<!------------- NAS class input Error ---------------> ')
if not (self.nas_params):
self.log.info('<!------------- NAS model hyperparameter input Error ---------------> ')
if not (self.targetFeature):
self.log.info('<!------------- NAS model targetFeature input Error ---------------> ')
if (self.n_models < 1):
self.n_models=1
if not (self.dfFeatures):
self.log.info('<!------------- NAS model features Error ---------------> ')
if (self.n_epochs < 1):
self.n_models=1
if not (self.optimizer):
self.optimizer="adam"
if not (self.tuner):
self.tuner="greedy"
if (self.seed < 1):
self.seed=0
if not (self.metrics):
self.metrics=None
except ValueError:
self.log.info('<------------------ NAS config file error. --------------->')
def recall_m(self,y_true, y_pred):
true_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_true * y_pred, 0, 1)))
possible_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + tf.keras.backend.epsilon())
return recall
def precision_m(self,y_true, y_pred):
true_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_true * y_pred, 0, 1)))
predicted_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + tf.keras.backend.epsilon())
return precision
def f1_score(self,y_true, y_pred):
precision = self.precision_m(y_true, y_pred)
recall = self.recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+tf.keras.backend.epsilon()))
def nasStructdataPreprocess(self):
df=self.data
self.paramCheck()
target=df[self.targetFeature].values
counter = Counter(target)
for k,v in counter.items():
per = v / len(target) * 100
self.log.info('autokeras struct Class=%d, Count=%d, Percentage=%.3f%%' % (k, v, per))
# select columns with numerical data types
num_ix = df.select_dtypes(include=['int64', 'float64']).columns
subset = df[num_ix]
last_ix = len(df.columns) - 1
y=df[self.targetFeature]
X = df.drop(self.targetFeature, axis=1)
#Using Pearson Correlation
# plt.figure(figsize=(12,10))
# cor = df.corr()
# sns.heatmap(cor, annot=True, cmap=plt.cm.Reds)
# plt.show()
# select categorical features
cat_ix = X.select_dtypes(include=['object', 'bool']).columns
# one hot encode cat features only
ct = ColumnTransformer([('o',OneHotEncoder(),cat_ix)], remainder='passthrough')
X = X.reset_index()
X=X.replace(to_replace="NULL",value=0)
X = X.dropna(how='any',axis=0)
X = ct.fit_transform(X)
from sklearn.preprocessing import scale
X = scale(X)
# label encode the target variable to have the classes 0 and 1
y = LabelEncoder().fit_transform(y)
# separate into train and test sets
X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=self.test_size,random_state=1)
return X_train, X_test, y_train, y_test
def nasStructClassification(self,scoreParam):
try:
objClf = aion_matrix()
X_train, X_test, y_train, y_test= self.xtrain, self.xtest, self.ytrain, self.ytest
modelName="nas_structdata_classifier"
self.log.info("Processing structured data block...\\n")
s_in = ak.StructuredDataInput()
#s_in = Flatten()(s_in)
s_out = ak.StructuredDataBlock(categorical_encoding=True)(s_in)
self.log.info("Data pipe via autokeras Classification Dense layers ...\\n")
s_out = ak.ClassificationHead()(s_out)
self.log.info("applying autokeras automodel to run different neural models...\\n")
try:
tuner = str(self.tuner).lower()
except UnicodeEncodeError:
tuner = (self.tuner.encode('utf8')).lower()
nasclf = ak.AutoModel(
inputs=s_in,
outputs=s_out,
overwrite=True,
tuner=tuner,
max_trials=self.n_models,
seed=self.seed)
# compile the model
#nasclf.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc',self.f1_score,self.precision_m, self.recall_m])
nasclf.fit(X_train, y_train, epochs=self.n_epochs)
best_model = nasclf.export_model()
mpredict=best_model.predict(X_test)
mtpredict=best_model.predict(X_train)
#loss, accuracy, f1_score, precision, recall = nasclf.evaluate(X_test, y_test, verbose=0)
#from sklearn.metrics import classification_report
#Classification report
y_pred_bool = np.argmax(mpredict, axis=1)
y_train_pred_bool = np.argmax(mtpredict, axis=1)
score = objClf.get_score(scoreParam,y_test, y_pred_bool)
#best_model = nasclf.export_model()
best_model_summary=best_model.summary()
filename = os.path.join(self.deployLocation,'log','summary.txt')
with open(filename,'w') as f:
best_model.summary(print_fn=lambda x: f.write(x + '\\n'))
f.close()
#self.log.info("==========")
#self.log.info(best_model_summary)
self.log.info("NAS struct data classification, best model summary: \\n"+str(best_model.summary(print_fn=self.log.info)))
#self.log.info("==========")
#Save and load model
# # #try:
# try:
# best_model.save("model_class_autokeras", save_format="tf")
# except Exception:
# best_model.save("model_class_autokeras.h5")
# loaded_model = load_model("model_class_autokeras", custom_objects=ak.CUSTOM_OBJECTS)
# loadedmodel_predict=loaded_model.predict(X_test)
loss,accuracy_m=nasclf.evaluate(X_test, y_test)
#mpredict_classes = mpredict.argmax(axis=-1)
#accuracy = accuracy_score(y_test.astype(int), mpredict.astype(int))
# precision tp / (tp + fp)
#precision = precision_score(y_test.astype(int), mpredict.astype(int),average='macro')
# recall: tp / (tp + fn)
#recall = recall_score(y_test.astype(int), mpredict.astype(int),average='macro')
#f1score=f1_score(y_test.astype(int), mpredict.astype(int) , average="macro")
self.log.info("Autokeras struct data classification metrics: \\n")
except Exception as inst:
self.log.info("Error: NAS failed "+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
print(inst)
return modelName,nasclf,score
def nasStructRegressor(self,scoreParam):
objClf = aion_matrix()
modelName="nas_struct_regressor"
#self.paramCheck()
X_train, X_test, y_train, y_test= self.xtrain, self.xtest, self.ytrain, self.ytest
# Autokeras alg
s_in = ak.StructuredDataInput()
#tf.keras.layers.GlobalMaxPooling2D()(s_in)
s_out = ak.StructuredDataBlock(categorical_encoding=True)(s_in)
self.log.info("Data pipe via autokeras Regression Dense layers ...\\n")
s_out = ak.RegressionHead(loss='mse', metrics=['mae'])(s_out)
self.log.info("applying autokeras automodel to evaluate different neural models...\\n")
try:
tuner = str(self.tuner).lower()
except UnicodeEncodeError:
tuner = (self.tuner.encode('utf8')).lower()
nas_reg = ak.AutoModel(
inputs=s_in,
outputs=s_out,
overwrite=True,
tuner=tuner,
max_trials=self.n_models)
nas_reg.fit(X_train, y_train, epochs=self.n_epochs)
best_model = nas_reg.export_model()
self.log.info("NAS struct data regression best model summary: \\n")
best_model_summary=best_model.summary(print_fn=self.log.info)
self.log.info(best_model_summary)
predictm=best_model.predict(X_test)
mtpredict=best_model.predict(X_train)
score = objClf.get_score(scoreParam,y_test, predictm)
self.log.info("Autokeras struct data regression metrics: \\n")
return modelName,nas_reg,score
def nasMain(self,scoreParam):
modelName = ""
nasclf=None
nas_reg=None
#text_reg_model=None
mse_value=0
reg_rmse=0
mape_reg=0
huber_loss_reg=0
accuracy=0
precision=0
recall=0
#Dummy values to return main for classification problems
dummy_score_1=int(0)
#dummy_score_2=int(0)
try:
if ((self.nas_class.lower() == "classification")):
modelName,nasclf,score=self.nasStructClassification(scoreParam)
self.log.info('NAS Struct Classification score: '+str(score))
best_model_nas = nasclf.export_model()
scoredetails = '{"Model":"NAS","Score":'+str(round(score,2))+'}'
return best_model_nas,self.nas_params,round(score,2),'NAS',-1,-1,-1
elif (self.nas_class.lower() == "regression"):
modelName,nas_reg,score =self.nasStructRegressor(scoreParam)
self.log.info('NAS Struct Regression score: '+str(score))
best_model_nas = nas_reg.export_model()
'''
filename = os.path.join(self.deployLocation,'model','autoKerasModel')
best_model_nas = nas_reg.export_model()
try:
best_model_nas.save(filename, save_format="tf")
modelName = 'autoKerasModel'
except Exception:
filename = os.path.join(self.deployLocation,'model','autoKerasModel.h5')
best_model_nas.save(filename)
modelName = 'autoKerasModel.h5'
'''
scoredetails = '{"Model":"NAS","Score":'+str(round(score,2))+'}'
'''
error_matrix = '"MSE":"'+str(round(mse_value,2))+'","RMSE":"'+str(round(reg_rmse,2))+'","MAPE":"'+str(round(mape_reg,2))+'","MSLE":"'+str(round(msle_reg,2))+'"'
'''
return best_model_nas,self.nas_params,score,'NAS'
else:
pass
except Exception as inst:
print(inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
output = {"status":"FA |
IL","message":str(inst).strip('"')}
output = json.dumps(output)
<s> import itertools
import logging
from typing import Optional, Dict, Union
from nltk import sent_tokenize
import torch
from transformers import(
AutoModelForSeq2SeqLM,
AutoTokenizer,
PreTrainedModel,
PreTrainedTokenizer,
)
logger = logging.getLogger(__name__)
class QGPipeline:
"""Poor man's QG pipeline"""
def __init__(
self,
model: PreTrainedModel,
tokenizer: PreTrainedTokenizer,
ans_model: PreTrainedModel,
ans_tokenizer: PreTrainedTokenizer,
qg_format: str,
use_cuda: bool
):
self.model = model
self.tokenizer = tokenizer
self.ans_model = ans_model
self.ans_tokenizer = ans_tokenizer
self.qg_format = qg_format
self.device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu"
self.model.to(self.device)
if self.ans_model is not self.model:
self.ans_model.to(self.device)
assert self.model.__class__.__name__ in ["T5ForConditionalGeneration", "BartForConditionalGeneration"]
if "T5ForConditionalGeneration" in self.model.__class__.__name__:
self.model_type = "t5"
else:
self.model_type = "bart"
def __call__(self, inputs: str):
inputs = " ".join(inputs.split())
sents, answers = self._extract_answers(inputs)
flat_answers = list(itertools.chain(*answers))
if len(flat_answers) == 0:
return []
if self.qg_format == "prepend":
qg_examples = self._prepare_inputs_for_qg_from_answers_prepend(inputs, answers)
else:
qg_examples = self._prepare_inputs_for_qg_from_answers_hl(sents, answers)
qg_inputs = [example['source_text'] for example in qg_examples]
questions = self._generate_questions(qg_inputs)
output = [{'answer': example['answer'], 'question': que} for example, que in zip(qg_examples, questions)]
return output
def _generate_questions(self, inputs):
inputs = self._tokenize(inputs, padding=True, truncation=True)
outs = self.model.generate(
input_ids=inputs['input_ids'].to(self.device),
attention_mask=inputs['attention_mask'].to(self.device),
max_length=32,
num_beams=4,
)
questions = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in outs]
return questions
def _extract_answers(self, context):
sents, inputs = self._prepare_inputs_for_ans_extraction(context)
inputs = self._tokenize(inputs, padding=True, truncation=True)
outs = self.ans_model.generate(
input_ids=inputs['input_ids'].to(self.device),
attention_mask=inputs['attention_mask'].to(self.device),
max_length=32,
)
dec = [self.ans_tokenizer.decode(ids, skip_special_tokens=False) for ids in outs]
answers = [item.split('<sep>') for item in dec]
answers = [i[:-1] for i in answers]
return sents, answers
def _tokenize(self,
inputs,
padding=True,
truncation=True,
add_special_tokens=True,
max_length=512
):
inputs = self.tokenizer.batch_encode_plus(
inputs,
max_length=max_length,
add_special_tokens=add_special_tokens,
truncation=truncation,
padding="max_length" if padding else False,
pad_to_max_length=padding,
return_tensors="pt"
)
return inputs
def _prepare_inputs_for_ans_extraction(self, text):
sents = sent_tokenize(text)
inputs = []
for i in range(len(sents)):
source_text = "extract answers:"
for j, sent in enumerate(sents):
if i == j:
sent = "<hl> %s <hl>" % sent
source_text = "%s %s" % (source_text, sent)
source_text = source_text.strip()
if self.model_type == "t5":
source_text = source_text + " </s> "
inputs.append(source_text)
return sents, inputs
def _prepare_inputs_for_qg_from_answers_hl(self, sents, answers):
inputs = []
for i, answer in enumerate(answers):
if len(answer) == 0: continue
for answer_text in answer:
sent = sents[i]
sents_copy = sents[:]
answer_text = answer_text.strip()
ans_start_idx = 0
# ans_start_idx = sent.index(answer_text)
# if answer_text in sent:
# ans_start_idx = sent.index(answer_text)
# else:
# continue
sent = f"{sent[:ans_start_idx]} <hl> {answer_text} <hl> {sent[ans_start_idx + len(answer_text): ]}"
sents_copy[i] = sent
source_text = " ".join(sents_copy)
source_text = f"generate question: {source_text}"
if self.model_type == "t5":
source_text = source_text + " </s> "
inputs.append({"answer": answer_text, "source_text": source_text})
return inputs
def _prepare_inputs_for_qg_from_answers_prepend(self, context, answers):
flat_answers = list(itertools.chain(*answers))
examples = []
for answer in flat_answers:
source_text = f"answer: {answer} context: {context}"
if self.model_type == "t5":
source_text = source_text + " </s> "
examples.append({"answer": answer, "source_text": source_text})
return examples
class MultiTaskQAQGPipeline(QGPipeline):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def __call__(self, inputs: Union[Dict, str]):
if type(inputs) is str:
# do qg
return super().__call__(inputs)
else:
# do qa
return self._extract_answer(inputs["question"], inputs["context"])
def _prepare_inputs_for_qa(self, question, context):
source_text = f"question: {question} context: {context}"
if self.model_type == "t5":
source_text = source_text + " </s> "
return source_text
def _extract_answer(self, question, context):
source_text = self._prepare_inputs_for_qa(question, context)
inputs = self._tokenize([source_text], padding=False)
outs = self.model.generate(
input_ids=inputs['input_ids'].to(self.device),
attention_mask=inputs['attention_mask'].to(self.device),
max_length=16,
)
answer = self.tokenizer.decode(outs[0], skip_special_tokens=True)
return answer
class E2EQGPipeline:
def __init__(
self,
model: PreTrainedModel,
tokenizer: PreTrainedTokenizer,
use_cuda: bool
) :
self.model = model
self.tokenizer = tokenizer
self.device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu"
self.model.to(self.device)
assert self.model.__class__.__name__ in ["T5ForConditionalGeneration", "BartForConditionalGeneration"]
if "T5ForConditionalGeneration" in self.model.__class__.__name__:
self.model_type = "t5"
else:
self.model_type = "bart"
self.default_generate_kwargs = {
"max_length": 256,
"num_beams": 4,
"length_penalty": 1.5,
"no_repeat_ngram_size": 3,
"early_stopping": True,
}
def __call__(self, context: str, **generate_kwargs):
inputs = self._prepare_inputs_for_e2e_qg(context)
# TODO: when overrding default_generate_kwargs all other arguments need to be passsed
# find a better way to do this
if not generate_kwargs:
generate_kwargs = self.default_generate_kwargs
input_length = inputs["input_ids"].shape[-1]
# max_length = generate_kwargs.get("max_length", 256)
# if input_length < max_length:
# logger.warning(
# "Your max_length is set to {}, but you input_length is only {}. You might consider decreasing max_length manually, e.g. summarizer('...', max_length=50)".format(
# max_length, input_length
# )
# )
outs = self.model.generate(
input_ids=inputs['input_ids'].to(self.device),
attention_mask=inputs['attention_mask'].to(self.device),
**generate_kwargs
)
prediction = self.tokenizer.decode(outs[0], skip_special_tokens=True)
questions = prediction.split("<sep>")
questions = [question.strip() for question in questions[:-1]]
return questions
def _prepare_inputs_for_e2e_qg(self, context):
source_text = f"generate questions: {context}"
if self.model_type == "t5":
source_text = source_text + " </s> "
inputs = self._tokenize([source_text], padding=False)
return inputs
def _tokenize(
self,
inputs,
padding=True,
truncation=True,
add_special_tokens=True,
max_length=512
):
inputs = self.tokenizer.batch_encode_plus(
inputs,
max_length=max_length,
add_special_tokens=add_special_tokens,
truncation=truncation,
padding="max_length" if padding else False,
pad_to_max_length=padding,
return_tensors="pt"
)
return inputs
SUPPORTED_TASKS = {
"question-generation": {
"impl": QGPipeline,
"default": {
"model": "valhalla/t5-small-qg-hl",
"ans_model": "valhalla/t5-small-qa-qg-hl",
}
},
"multitask-qa-qg": {
"impl": MultiTaskQAQGPipeline,
"default": {
"model": "valhalla/t5-small-qa-qg-hl",
}
},
"e2e-qg": {
"impl": E2EQGPipeline,
"default": {
"model": "valhalla/t5-small-e2e-qg",
}
}
}
def pipeline(
task: str,
model: Optional = None,
tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None,
qg_format: Optional[str] = "highlight",
ans_model: Optional = None,
ans_tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None,
use_cuda: Optional[bool] = True,
**kwargs,
):
# Retrieve the task
if task not in SUPPORTED_TASKS:
raise KeyError("Unknown task {}, available tasks are {}".format(task, list(SUPPORTED_TASKS.keys())))
targeted_task = SUPPORTED_TASKS[task]
task_class = targeted_task["impl"]
# Use default model/config/tokenizer for the task if no model is provided
if model is None:
model = targeted_task["default"]["model"]
# Try to infer tokenizer from model or config name (if provided as str)
if tokenizer is None:
if isinstance(model, str):
tokenizer = model
else:
# Impossible to guest what is the right tokenizer here
raise Exception(
"Impossible to guess which tokenizer to use. "
"Please provided a PretrainedTokenizer class or a path/identifier to a pretrained tokenizer."
)
# Instantiate tokenizer if needed
if isinstance(tokenizer, (str, tuple)):
if isinstance(tokenizer, tuple):
# For tuple we have (tokenizer name, {kwargs})
tokenizer = AutoTokenizer.from_pretrained(tokenizer[0], **tokenizer[1])
else:
tokenizer = AutoTokenizer.from_pretrained(tokenizer)
# Instantiate model if needed
if isinstance(model, str):
model = AutoModelForSeq2SeqLM.from_pretrained(model)
if task == "question-generation":
if ans_model is None:
# load default ans model
ans_model = targeted_task["default"]["ans_model"]
ans_tokenizer = AutoTokenizer.from_pretrained(ans_model)
ans_model = AutoModelForSeq2SeqLM.from_pretrained(ans_model)
else:
# Try to infer tokenizer from model or config name (if provided as str)
if ans_tokenizer is None:
if isinstance(ans_model, str):
ans_tokenizer = ans_model
else:
# Impossible to guest what is the right tokenizer here
raise Exception(
"Impossible to guess which tokenizer to use. "
"Please provided a PretrainedTokenizer class or a path/identifier to a pretrained tokenizer."
)
# Instantiate tokenizer if needed
if isinstance(ans_tokenizer, (str, tuple)):
if isinstance(ans_tokenizer, tuple):
# For tuple we have (tokenizer name, {kwargs})
ans_tokenizer = AutoTokenizer.from_pretrained(ans_tokenizer[0], **ans_tokenizer[1])
else:
ans_tokenizer = AutoTokenizer.from_pretrained(ans_tokenizer)
if isinstance(ans_model, str):
ans_model = AutoModelForSeq2SeqLM.from_pretrained(ans_model)
if task == "e2e-qg":
return task_class(model=model, tokenizer=tokenizer, use_cuda=use_cuda)
elif task == "question-generation":
return task_class |
(model=model, tokenizer=tokenizer, ans_model=ans_model, ans_tokenizer=ans_tokenizer, qg_format=qg_format, use_cuda=use_cuda)
else:
return task_class(model=model, tokenizer=tokenizer, ans_model=model, ans_tokenizer=tokenizer, qg_format=qg_format, use_cuda=use_cuda)
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import socket
import os
import rsa
from os.path import expanduser
from pathlib import Path
import requests
import platform
from appbe.dataPath import DATA_DIR
import socket
import getmac
import subprocess
import sys
import json
from datetime import datetime
import binascii
computername = socket.getfqdn()
global_key = '''
-----BEGIN RSA PUBLIC KEY-----
MIIBCgKCAQEAzJcxqRiUpp7CzViyqNlYaeyceDh5y6Ib4SoxoyNkN3+k0q+cr1lb
k0KdWTtHIVqH1wsLYofYjpB7X2RN0KYTv8VfwmfQNrpFEbiRz4gcAeuxGCPgGaue
N1ttujQMWHWCcY+UH5Voh8YUfkW8P+T3zxvr1d30D+kVBJC59y/31JvTzr3Bw/T+
NYv6xiienYiEYtm9d5ATioEwZOXaQBrtVvRmqcod5A1h4kn1ZauLX2Ph8H4TAuit
NLtw6xUCJNumphP7xdU+ca6P6a6eaLprgKhvky+nz16u9/AC2AazRQHKWf8orS6b
fw16JDCRs0zU4mTQLCjkUUt0edOaRhUtcQIDAQAB
-----END RSA PUBLIC KEY-----
'''
quarter_key = '''
-----BEGIN RSA PUBLIC KEY-----
MIIBCgKCAQEAmKzOJxVEV9ulA+cjfxguAduLMD47OWjLcEAEmEuK8vR4O5f6e2h1
08NniGC+nkwqmM00U7JTVBkqnt9S/JgE3pAH2xwfWda2OvXNWisWmOQdqB0+XRHh
NXsIG3yRk/sMlDpe7MJIyM5ADSu01PLn9FZTfmMq7lEp32tAf71cuUE/dwuWSvEQ
WK2hn1L4D97O43XCd7FHtMSHfgtjdcCFgX9IRgWLKC8Bm3q5qcqF4v3cHuYTj3V9
njxPtRqPg6HJFiJrm9AX5bUEHAvbTcw4wAmsNTRQHPvVB+Lc+yGh5x8crhKjNB01
gdB5I3a4mPO7dKvadR6Mr28trr0Ff5t2HQIDAQAB
-----END RSA PUBLIC KEY-----
'''
halfYear_key='''
-----BEGIN RSA PUBLIC KEY-----
MIIBCgKCAQEAgrGNwl8CNYQmVxi8/GEgPjfL5aEmyPkDyaJb9h4hZDSZCeeKd7Rv
wwhuRTdBBfOp0bQ7QS7NYMg38Xlc3x85I9RnxdQdDKn2nRuvG0hG3wMBFy/DCSXF
tXbDjJkLijAhqcBNu8m+a2Gtn14ShC7TbcfY4iVXho3WFUrn0xq6S5ducqWCsLJh
R+TNImCaMICqfoAzEDGC3ojO5Hi3vJmmyK5CVp6bt4wLRATQjcp1ujGW4Uv4kEgp
7TR077c226v1KOdKdyZPHJzT1MKwZrG2Gdluk3/Y1apbwyGzYqFdTCOAB+mE73Dn
wFXURgDJQmaU2oxxaA13WRcELpnirm+aIwIDAQAB
-----END RSA PUBLIC KEY-----
'''
oneYear_key='''
-----BEGIN RSA PUBLIC KEY-----
MIIBCgKCAQEA3GLqn+vkKn3fTNH3Bbb3Lq60pCoe+mn0KPz74Bp7p5OkZAUe14pP
Tcf/UqdPwiENhSCseWtfZmfKDK8qYRHJ5xW02+AhHPPdiacS45X504/lGG3q/4SG
ZgaFhMDvX+IH/ZH+qqbU3dRQhXJCCrAVAa7MonzM6yPiVeS2SdpMkNg1VDR1oTLB
Pn+qSV6CnkK1cYtWCRQ23GH2Ru7fc09r7m8hVcifKJze84orpHC5FX0WScQuR8h/
fs1IbGkxTOxP8vplUj/cd4JjUxgd+w+8R4kcoPhdGZF5UGeZA8xMERzQLvh+4Ui0
KIvz5/iyKB/ozaeSG0OMwDAk3WDEnb1WqQIDAQAB
-----END RSA PUBLIC KEY-----
'''
full_key='''
-----BEGIN RSA PUBLIC KEY-----
MIIBCgKCAQEAhqfNMuYYLdVrePhkO9rU/qT6FgolzI0YyzIJ2OeJE+++JioYm6nn
ohQU32iiE0DZlCCLrHJXOOIAz2Op80goX0lxtngyxVUPsiB5CI77sAC7x6K3anJ0
elpnQCC0+xV2ZL5eIMNQHLe+X6wJl/HGWqkUlxKpWr4/kBEB4EisW60OePfhntIN
4OUJ7iEq+sDdOM5WazJIXeNV1cig4i6057GE3k5ITcQUmw17DZu2+dqkIscckaG+
t5SF7Qnvt4IY8IeQp2htx3yD+CJCV0u2uKwoSFMGJn3OWdaixC3+eojyMXmfAWtQ
Ee9NLNNaTCMIvQ8BeItJLQs2Htw3bZNMvwIDAQAB
-----END RSA PUBLIC KEY-----
'''
def validate_key_Pair(privatepath,publickey):
with open(privatepath, 'rb') as privatefile:
keydata = privatefile.read()
privatefile.close()
try:
privkey = rsa.PrivateKey.load_pkcs1(keydata,'PEM')
data = 'Validate Global License'
signature = rsa.sign(data.encode('utf-8'), privkey, 'SHA-1')
pubkey = rsa.PublicKey.load_pkcs1(publickey)
except:
return False
try:
rsa.verify(data.encode('utf-8'), signature, pubkey)
return True
except Exception as e:
return False
def updateDRecord(licensepath):
domain_license_path = os.path.join(DATA_DIR,'License','license_domain.lic')
if(os.path.isfile(licensepath)):
with open(licensepath, 'rb') as f:
licensekey = f.read()
f.close()
with open(domain_license_path, 'wb') as f:
f.write(licensekey)
f.close()
if(validate_key_Pair(domain_license_path,global_key)):
return True,'Valid Domain License'
else:
return False,'Invalid Domain License'
else:
return False,'File Not Exists'
def generateLicenseKey(userKey):
record = {'UserKey':userKey}
record = json.dumps(record)
status = 'Error'
url = 'https://qw7e33htlk.execute-api.ap-south-1.amazonaws.com/default/aion_license'
try:
response = requests.post(url, data=record,headers={"x-api-key":"3cQKRkKA4S57pYrkFp1Dd9jRXt4xnFoB9iqhAQRM","Content-Type":"application/json",})
if response.status_code == 200:
outputStr=response.content
outputStr = outputStr.decode('utf-8','ignore')
outputStr = outputStr.strip()
license_dict = json.loads(str(outputStr))
if license_dict['status'] == 'success':
status = 'Success'
licenseKey = license_dict['msg']
else:
status = 'Error'
licenseKey = ''
else:
status = 'Error'
licenseKey = ''
except Exception as inst:
print(inst)
status = 'Error'
licenseKey = ''
msg = {'status':status,'key':userKey,'licenseKey':licenseKey,'link':''}
return msg
def updateRecord(licensepath):
currentDirectory = os.path.dirname(os.path.abspath(__file__))
license_path = os.path.join(currentDirectory,'..','lic','license.lic')
if(os.path.isfile(licensepath)):
with open(licensepath, 'rb') as f:
licensekey = f.read()
f.close()
with open(license_path, 'wb') as f:
f.write(licensekey)
f.close()
status,msg = check_domain_license()
if status:
status,msg = getdaysfromstartdate()
if status:
status,msg = check_days_license(int(msg))
return status,msg
else:
return False,'File Not Exists'
def check_domain_license():
if 'CORP.HCL.IN' in computername:
return True,'HCL Domain'
else:
return True,'HCL Domain'
def diff_month(d1, d2):
return (d1.year - d2.year) * 12 + d1.month - d2.month
def getdaysfromstartdate():
currentDirectory = os.path.dirname(os.path.abspath(__file__))
startdatePath = os.path.join(currentDirectory,'..','lic','startdate.txt')
if(os.path.isfile(startdatePath)):
with open(startdatePath, "rb") as fl:
encrypted_message = fl.read()
fl.close()
privkey = '''-----BEGIN RSA PRIVATE KEY-----
MIIEqwIBAAKCAQEAm75ZwaepuxGJjU1Slk1+IUO2E49Hy8i9dym5FUaBRyTRH6R+
GTF1kcpd+1QinIZDMIdsmAc95Y8pTufxY30QxCkOhVASitSQWHS/IiWQHmsTJwdr
38lqZnQQloOt/iPlhcavbxu/yKFzwBmp+nM+ErDTnCBh6EGCGrw1xWF30T2IBpmp
WwMEoqZsFV69RzwQAw39KG1KCxi5uscrB62YPgUdlT2b4Yaa90egQhGLLVdnKvhP
ORiGT9omCH90Dkm1oMMQ0Y2JBLezgXa/bunSqtTBxEwzlwUAX2JJcanFYrzKy2OL
xzwNRlWUXilZ4R/1RHAgUdNyKbYxZqc24MApoQIDAQABAoIBAQCHZ/i7gNz10qqH
2qkqGlfF7gvYd6MRTwdDGlhbYgA17ZGP9EDaAIFabtpFEAJDmgvCnotQpkMvWcet
XcUmHW89TQDd8R8d6u9QqLggpQ3nFGsDbNViLMjAKLrfUb8tjOIZ7ANNE5ArjAuK
AgYhxJ48O9bPD+xvtLwip95PHxMMz1CF0vxrpCinvPdeC3HzcnLNZWN3ustbph/4
Tx8mrKDpAVIHVYVbY4CMtm7NbIBYdyR9Lokc4zBg/OTuLo+0QRVJ3GHAN6cGxTwY
vLwN9iBBHyn9WBp5NIOSoCdob7+ce8y+X8yHmVhwRCfcrYphzfFNfP7SPNzV1dLs
dFybn/h9AoGJALCOC7ss+PBXy5WrWVNRPzFO7KrJDl5q7s/gMk0PkB4i4XOKHDTl
MhHZXhxp84HwpphwNxPHvpFe3pVZwwoe8LH1neoodlLOF0Kuk3jENh6cMhKFvcZ+
gxaBxGSCOXF/U307mh0i4AafClhVjxtLgBW5iJSVA9Brc7ZqVwxlUP7aYGzReIE1
uEMCeQDh0vq8NteUlkM/wpNzrHHqgtEzePbTYa+QcTm4xhARHR/cO+E0/mZIfltw
3NVWCIalMia+aKnvRHqHy/cQfEo |
2Uv/h8oARWnbrvicMRTwYL0w2GrP0f+aG0RqQ
msLMzS3kp6szhM7C99reFxdlxJoWBKkp94psOksCgYkApB01zGRudkK17EcdvjPc
sMHzfoFryNpPaI23VChuR4UW2mZ797NAypSqRXE7OALxaOuOVuWqP8jW0C9i/Cps
hI+SnZHFAw2tU3+hd3Wz9NouNUd6c2MwCSDQ5LikGttHSTa49/JuGdmGLTxCzRVu
V0NiMPMfW4I2Sk8o4U3gbzWgwiYohLrhrwJ5ANun/7IB2lIykvk7B3g1nZzRYDIk
EFpuI3ppWA8NwOUUoj/zksycQ9tx5Pn0JCMKKgYXsS322ozc3B6o3AoSC5GpzDH4
UnAOwavvC0ZZNeoEX6ok8TP7EL3EOYW8s4zIa0KFgPac0Q0+T4tFhMG9qW+PWwhy
Oxeo3wKBiCQ8LEgmHnXZv3UZvwcikj6oCrPy8fnhp5RZl2DPPlaqf3vokE6W5oEo
LIKcWKvth3EU7HRKwYgaznj/Mw55aETx31R0FiXMG266B4V7QWPF/KuaR0GBsYfu
+edGXQCnLgooKlMtQLdL5mcLXHc9x/0Z0iYEejJtbjcGR87WylSNaCH3hH703iQ=
-----END RSA PRIVATE KEY-----
'''
privkey = rsa.PrivateKey.load_pkcs1(privkey,'PEM')
decrypted_message = rsa.decrypt(encrypted_message, privkey)
decrypted_message = decrypted_message.decode()
import datetime
start_time = datetime.datetime.strptime(decrypted_message, '%Y-%m-%d')
current_date = datetime.datetime.today().strftime('%Y-%m-%d')
current_date = datetime.datetime.strptime(current_date, '%Y-%m-%d')
Months = diff_month(current_date,start_time)
return True,Months
else:
return False,'Start Date Not Exists'
def check_days_license(months):
currentDirectory = os.path.dirname(os.path.abspath(__file__))
license_path = os.path.join(currentDirectory,'..','lic','license.lic')
if(os.path.isfile(license_path)):
if(validate_key_Pair(license_path,full_key)):
return True,'Valid License'
elif(validate_key_Pair(license_path,oneYear_key)):
if months <= 12:
return True,'Valid License'
else:
return False,'License for AI.ON has expired. Please contact ERS Research for renewal.'
elif(validate_key_Pair(license_path,halfYear_key)):
if months <= 6:
return True,'Valid License'
else:
return False,'License for AI.ON has expired. Please contact ERS Research for renewal.'
elif(validate_key_Pair(license_path,quarter_key)):
if months <= 3:
return True,'Valid License'
else:
return False,'License for AI.ON has expired. Please contact ERS Research for renewal.'
else:
return False,'Invalid License'
else:
return False,'License Not exists.Please contact ERS Research for renewal.'
def checklicense():
import binascii
license_path = os.path.join(DATA_DIR,'License','license.lic')
if(os.path.isfile(license_path)):
try:
with open(license_path, 'r') as privatefile:
license_key = privatefile.read()
privatefile.close()
encrypted_message = binascii.unhexlify(license_key.encode())
privkey = '''-----BEGIN RSA PRIVATE KEY-----
MIIEqQIBAAKCAQEAhqfNMuYYLdVrePhkO9rU/qT6FgolzI0YyzIJ2OeJE+++JioY
m6nnohQU32iiE0DZlCCLrHJXOOIAz2Op80goX0lxtngyxVUPsiB5CI77sAC7x6K3
anJ0elpnQCC0+xV2ZL5eIMNQHLe+X6wJl/HGWqkUlxKpWr4/kBEB4EisW60OePfh
ntIN4OUJ7iEq+sDdOM5WazJIXeNV1cig4i6057GE3k5ITcQUmw17DZu2+dqkIscc
kaG+t5SF7Qnvt4IY8IeQp2htx3yD+CJCV0u2uKwoSFMGJn3OWdaixC3+eojyMXmf
AWtQEe9NLNNaTCMIvQ8BeItJLQs2Htw3bZNMvwIDAQABAoIBAGGmuRnrYaeDeWAO
CmqZxRMyQybOjyDrRgq9rAR/zJoHp8b3ikcBDTkuBQELWVZLFj7k50XU2cono9zC
cxI5xwVrNqrUOkV+7VYJVJzPTFkT/xnEt+zbOfstKmmIDpdzthtTLuHlomhhHA83
rPFi5a0Dpynz35suEnm6ONxx4ICONa3xkQ51ALm8EEsdJ+qRQhi2HLTF/OVZMxSa
A2DlFd4ChOEbYaN63xVCDxPXe9BfeHd/Rnim9x4xL9i2RL+mhARUy/ZP6LMHIPk7
NxTrGr4TuE/ETg8FZ3cywSnwsMlcplXo8Ar+5ths2XKxbmH1TI/vuQV1r7r0IeqV
F4W/xOkCgYkAiDQy7/WyJWuT+rQ+gOjSUumXgWE3HO+vJAsy05cTZFSs+nUE4ctn
FnvbBIRuClSr3zhcTtjaEaVnZ2OmGfOoAq0cvaXSlxqEs2456WQBf9oPHnvJEV07
AIqzo2EuDvGUh/bkFN3+djRRL9usNNplYA8jU3OQHGdeaS15ZikT+ZkQLXoHE0Oh
vQJ5AP0W9Qouvc9jXRhjNNOWmgt+JiHw/oQts/LUWJ2T4UJ7wKAqGwsmgf0NbF2p
aZ6AbMc7dHzCb52iLJRxlmlkJYzg449t0MgQVxTKQ5viIAdjkRBCIY2++GcYXb6k
6tUnF0Vm2kpffYUb5Lx5JoUE6IhMP0mEv3jKKwKBiCmvoC9lCUL+q+m9JKwbldOe
fqowcMfAa+AiNUohIORCLjbxfa8Fq+VrvtqhFXS/+WJ2Q3o2UHe6Ie24x+uFcVRw
Wy2IBO4ORbMM91iBLRxORvZTeHSCDj7aNKS6Z3hXY9hBLglc8DaJSJfXKdt7RC+k
MnGmGuM2l+Sk8FTeGaj4ucTRZjz1JBkCeQDhNSV1GyShv4xeoCCoy1FmOqmZ+EWy
vqxqv1PfXHDM5SwCGZWY9XokAGbWbWLjvOmO27QLNEV34pCCwxSR0aCsXI2B2rk2
3Xtvr5A7zRqtGIdEDWSoKjAGJSN9+mhQpglKI3zJQ3GBGdIPeEqzgSud5SNHu01a
IaMCgYgyoxtqdWi90iE75/x+uIVGJRdHtWoL2dr8Ixu1bOMjKCR8gjneSRTqI1tA
lbRH5K/jg6iccB/pQmBcIPIubF10Nv/ZQV760WK/h6ue2hOCaBLWT8EQEEfBfnp+
9rfBfNQIQIkBFTfGIHXUUPb9sJgDP1boUxcqxr9bpKUrs1EMkUd+PrvpHIj2
-----END RSA PRIVATE KEY-----
'''
privkey = rsa.PrivateKey.load_pkcs1(privkey,'PEM')
decrypted_message = rsa.decrypt(encrypted_message, privkey)
msg = decrypted_message.decode().split('####')
product = msg[0]
computernameLicense = msg[1]
computername = socket.getfqdn()
licenseValid = False
if product.lower() == 'aion':
if computernameLicense == computername:
uuidlicense = msg[3]
uuid = guid()
if uuidlicense == uuid:
current_date = datetime.now()
license_expiry_date = msg[5]
license_expiry_date = datetime.strptime(license_expiry_date,'%Y-%m-%d %H:%M:%S')
if current_date > license_expiry_date:
return False,'License Expire'
else:
return True,''
return False,'License Error'
except Exception as e:
print(e)
return False,'License Error'
else:
return False,'Generate License'
def generate_record_key(product,version):
computername = socket.getfqdn()
macaddress = getmac.get_mac_address()
license_date = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
try:
user = os.getlogin()
except:
user = 'NA'
uuid = guid()
msg = product+'###'+version+'###'+computername+'###'+macaddress+'###'+user+'###'+sys.platform+'###'+uuid+'###'+license_date
pkeydata='''-----BEGIN RSA PUBLIC KEY-----
MIIBCgKCAQEAm75ZwaepuxGJjU1Slk1+IUO2E49Hy8i9dym5FUaBRyTRH6R+GTF1
kcpd+1QinIZDMIdsmAc95Y8pTufxY30QxCkOhVASitSQWHS/IiWQHmsTJwdr38lq
ZnQQloOt/iPlhcavbxu/yKFzwBmp+nM+ErDTnCBh6EGCGrw1xWF30T2IBpmpWwME
oqZsFV69RzwQAw39KG1KCxi5uscrB62YPgUdlT2b4Yaa90egQhGLLVdnKvhPORiG
T9omCH90Dkm1oMMQ0Y2JBLezgXa/bunSqtTBxEwzlwUAX2JJcanFYrzKy2OLxzwN
RlWUXilZ4R/1RHAgUdNyKbYxZqc24MApoQIDAQAB
-----END RSA PUBLIC KEY-----
'''
pubkey = rsa.PublicKey.load_pkcs1(pkeydata)
encrypted_message = rsa.encrypt(msg.encode(), pubkey)
encrypted_message = binascii.hexlify(encrypted_message).decode()
return(encrypted_message)
def run(cmd):
try:
return subprocess.run(cmd, shell=True, capture_output=True, check=True, encoding="utf-8").stdout.strip()
except Exception as e:
print(e)
return None
def guid():
if sys.platform == 'darwin':
return run(
"ioreg -d2 -c IOPlatformExpertDevice | awk -F\\\\\\" '/IOPlatformUUID/{print $(NF-1)}'",
)
if sys.platform == 'win32' or sys.platform == 'cygwin' or sys.platform == 'msys':
return run('wmic csproduct get uuid').split('\\n')[2].strip()
if sys.platform.startswith('linux'):
return run('cat /var/lib/dbus/machine-id') or \\
run('cat /etc/machine-id')
if sys.platform.startswith('openbsd') or sys.platform.startswith('freebsd'):
return run('cat /etc/hostid') or \\
run('kenv -q smbios.system.uuid')
def updateLicense(licensekey):
license_folder = os.path.join(DATA_DIR,'License')
license_folder = Path(license_folder)
license_folder.mkdir(parents=True, exist_ok=True)
license_file = license_folder/'license.lic'
with open(license_file, "w") as fl:
fl.write(licensekey)
fl.close()
def enterRecord(version):
validLicense,msg = checklicense()
if not validLicense:
key = generate_record_key('AION',version)
msg = {'status':msg,'key':key,'licenseKey':'','link':''}
return validLicense,msg
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import pandas as pd
import numpy as np
import os
import datetime, time, timeit
from sklearn.model_selection import KFold
from sklearn.metrics import confusion_matrix |